From 982abc239efa69a808e4a2b4ecbb01126b253d8f Mon Sep 17 00:00:00 2001 From: Yahor Yuzefovich Date: Wed, 18 Dec 2024 18:03:28 -0800 Subject: [PATCH 1/4] sql: fix including all related FK tables into the stmt bundle Earlier this year in 084a7411b0ba5767506f48a4d580ee7d2e21fef2 we made a change to include all tables referencing the table from the stmt bundle (i.e. those that we have an inbound FK relationship with). However, we forgot to include tables that the referencing table is dependent on, so this commit fixes that oversight. Namely, we now include all tables that we reference or that reference us via the FK relationship, and this rule is applied recursively to every table in consideration. (Previously, we didn't include inbound FKs when handling an outbound FK nor outbound FKs when handling an inbound FK.) Note that I believe we could have avoided including referencing tables (i.e. with an inbound FK relationship) that only have NO ACTION or RESTRICT actions in the ON DELETE and ON UPDATE, but I think those cases aren't very common, and it's unlikely to hurt including all "related" tables. The case of FK cycles is still not handled correctly on the stmt bundle recreation, but I think we have yet to run into one in a support ticket. Release note: None --- pkg/sql/explain_bundle_test.go | 17 ++++++--- pkg/sql/opt/metadata.go | 64 +++++++++++++++------------------- 2 files changed, 41 insertions(+), 40 deletions(-) diff --git a/pkg/sql/explain_bundle_test.go b/pkg/sql/explain_bundle_test.go index 0baf88d1d4ba..381c4fec265b 100644 --- a/pkg/sql/explain_bundle_test.go +++ b/pkg/sql/explain_bundle_test.go @@ -276,11 +276,18 @@ CREATE TABLE users(id UUID DEFAULT gen_random_uuid() PRIMARY KEY, promo_id INT R }) t.Run("foreign keys", func(t *testing.T) { + // All tables should be included in the stmt bundle, regardless of which + // one we query because all of them are considered "related" (even + // though we don't specify ON DELETE and ON UPDATE actions). + tableNames := []string{"parent", "child1", "child2", "grandchild1", "grandchild2"} r.Exec(t, "CREATE TABLE parent (pk INT PRIMARY KEY, v INT);") - r.Exec(t, "CREATE TABLE child (pk INT PRIMARY KEY, fk INT REFERENCES parent(pk));") + r.Exec(t, "CREATE TABLE child1 (pk INT PRIMARY KEY, fk INT REFERENCES parent(pk));") + r.Exec(t, "CREATE TABLE child2 (pk INT PRIMARY KEY, fk INT REFERENCES parent(pk));") + r.Exec(t, "CREATE TABLE grandchild1 (pk INT PRIMARY KEY, fk INT REFERENCES child1(pk));") + r.Exec(t, "CREATE TABLE grandchild2 (pk INT PRIMARY KEY, fk INT REFERENCES child2(pk));") contentCheck := func(name, contents string) error { if name == "schema.sql" { - for _, tableName := range []string{"parent", "child"} { + for _, tableName := range tableNames { if regexp.MustCompile("CREATE TABLE defaultdb.public."+tableName).FindString(contents) == "" { return errors.Newf( "could not find 'CREATE TABLE defaultdb.public.%s' in schema.sql:\n%s", tableName, contents) @@ -289,12 +296,12 @@ CREATE TABLE users(id UUID DEFAULT gen_random_uuid() PRIMARY KEY, promo_id INT R } return nil } - for _, tableName := range []string{"parent", "child"} { + for _, tableName := range tableNames { rows := r.QueryStr(t, "EXPLAIN ANALYZE (DEBUG) SELECT * FROM "+tableName) checkBundle( t, fmt.Sprint(rows), "child", contentCheck, false, /* expectErrors */ - base, plans, "stats-defaultdb.public.parent.sql", "stats-defaultdb.public.child.sql", - "distsql.html vec.txt vec-v.txt", + base, plans, "stats-defaultdb.public.parent.sql", "stats-defaultdb.public.child1.sql", "stats-defaultdb.public.child2.sql", + "stats-defaultdb.public.grandchild1.sql", "stats-defaultdb.public.grandchild2.sql", "distsql.html vec.txt vec-v.txt", ) } }) diff --git a/pkg/sql/opt/metadata.go b/pkg/sql/opt/metadata.go index d42a4dd5d76b..91863b77aab6 100644 --- a/pkg/sql/opt/metadata.go +++ b/pkg/sql/opt/metadata.go @@ -1007,49 +1007,43 @@ func (md *Metadata) getAllReferenceTables( var tableSet intsets.Fast var tableList []cat.DataSource var addForeignKeyReferencedTables func(tab cat.Table) + var addForeignKeyReferencingTables func(tab cat.Table) + // handleRelatedTables is a helper function that processes the given table + // if it hasn't been handled yet by adding all referenced and referencing + // table of the given one, including via transient (recursive) FK + // relationships. + handleRelatedTables := func(tabID cat.StableID) { + if !tableSet.Contains(int(tabID)) { + tableSet.Add(int(tabID)) + ds, _, err := catalog.ResolveDataSourceByID(ctx, cat.Flags{}, tabID) + if err != nil { + // This is a best-effort attempt to get all the tables, so don't + // error. + return + } + refTab, ok := ds.(cat.Table) + if !ok { + // This is a best-effort attempt to get all the tables, so don't + // error. + return + } + // We want to include all tables that we reference before adding + // ourselves, followed by all tables that reference us. + addForeignKeyReferencedTables(refTab) + tableList = append(tableList, ds) + addForeignKeyReferencingTables(refTab) + } + } addForeignKeyReferencedTables = func(tab cat.Table) { for i := 0; i < tab.OutboundForeignKeyCount(); i++ { tabID := tab.OutboundForeignKey(i).ReferencedTableID() - if !tableSet.Contains(int(tabID)) { - tableSet.Add(int(tabID)) - ds, _, err := catalog.ResolveDataSourceByID(ctx, cat.Flags{}, tabID) - if err != nil { - // This is a best-effort attempt to get all the tables, so don't error. - continue - } - refTab, ok := ds.(cat.Table) - if !ok { - // This is a best-effort attempt to get all the tables, so don't error. - continue - } - // We want to include all tables that we reference before adding - // ourselves. - addForeignKeyReferencedTables(refTab) - tableList = append(tableList, ds) - } + handleRelatedTables(tabID) } } - var addForeignKeyReferencingTables func(tab cat.Table) addForeignKeyReferencingTables = func(tab cat.Table) { for i := 0; i < tab.InboundForeignKeyCount(); i++ { tabID := tab.InboundForeignKey(i).OriginTableID() - if !tableSet.Contains(int(tabID)) { - tableSet.Add(int(tabID)) - ds, _, err := catalog.ResolveDataSourceByID(ctx, cat.Flags{}, tabID) - if err != nil { - // This is a best-effort attempt to get all the tables, so don't error. - continue - } - refTab, ok := ds.(cat.Table) - if !ok { - // This is a best-effort attempt to get all the tables, so don't error. - continue - } - // We want to include ourselves before all tables that reference - // us. - tableList = append(tableList, ds) - addForeignKeyReferencingTables(refTab) - } + handleRelatedTables(tabID) } } for i := range md.tables { From c06ba739c850a87317ac6845be7822c15e12a37b Mon Sep 17 00:00:00 2001 From: Yahor Yuzefovich Date: Wed, 18 Dec 2024 20:46:48 -0800 Subject: [PATCH 2/4] roachtest: use correct warehouse number in import In 0a948740dddb3fc1d3e77ecb0f1f0e8b0e505049 we introduced a bug which hard-coded that we'd always use 1 warehouse in tpcc import roachtests. This is now fixed. Release note: None --- pkg/cmd/roachtest/tests/import.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cmd/roachtest/tests/import.go b/pkg/cmd/roachtest/tests/import.go index 698e25b8585e..e745ed38570e 100644 --- a/pkg/cmd/roachtest/tests/import.go +++ b/pkg/cmd/roachtest/tests/import.go @@ -150,7 +150,7 @@ func registerImportTPCC(r registry.Registry) { } else { defer hc.Done() } - cmd := fmt.Sprintf(workloadStr, 1) + cmd := fmt.Sprintf(workloadStr, warehouses) // Tick once before starting the import, and once after to capture the // total elapsed time. This is used by roachperf to compute and display // the average MB/sec per node. From f09d9746b1ab8d3b55226488744c1e1598d25bdf Mon Sep 17 00:00:00 2001 From: Yahor Yuzefovich Date: Wed, 18 Dec 2024 21:47:33 -0800 Subject: [PATCH 3/4] sql: allow CLOSE CURSOR in read-only txns This is allowed in PG, so we now will allow this too. The fix is similar to what we did for DECLARE and FETCH in 50a799953f27e672e08006a791d32154f7221641. Release note (bug fix): CLOSE CURSOR statements are now allowed in read-only transactions, similat to Postgres. The bug has been present since at least 23.1 version. --- pkg/sql/logictest/testdata/logic_test/txn | 5 ++++- pkg/sql/sem/tree/stmt.go | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/sql/logictest/testdata/logic_test/txn b/pkg/sql/logictest/testdata/logic_test/txn index b9069f0a6a94..85063ed9feb3 100644 --- a/pkg/sql/logictest/testdata/logic_test/txn +++ b/pkg/sql/logictest/testdata/logic_test/txn @@ -1520,13 +1520,16 @@ SET SESSION AUTHORIZATION DEFAULT statement ok BEGIN -# DECLARE and FETCH CURSOR should work in a read-only txn. +# DECLARE, FETCH, and CLOSE CURSOR should work in a read-only txn. statement ok DECLARE foo CURSOR FOR SELECT 1 statement ok FETCH 1 foo +statement ok +CLOSE foo + statement ok COMMIT diff --git a/pkg/sql/sem/tree/stmt.go b/pkg/sql/sem/tree/stmt.go index a5cdde453f9d..47bed5989f09 100644 --- a/pkg/sql/sem/tree/stmt.go +++ b/pkg/sql/sem/tree/stmt.go @@ -768,7 +768,7 @@ func (*CannedOptPlan) StatementTag() string { return "PREPARE AS OPT PLAN" } func (*CloseCursor) StatementReturnType() StatementReturnType { return Ack } // StatementType implements the Statement interface. -func (*CloseCursor) StatementType() StatementType { return TypeDCL } +func (*CloseCursor) StatementType() StatementType { return TypeDML } // StatementTag returns a short string identifying the type of statement. func (*CloseCursor) StatementTag() string { return "CLOSE" } From abbbbc21bf78b45699b96178bba85a7346311496 Mon Sep 17 00:00:00 2001 From: Marcus Gartner Date: Thu, 19 Dec 2024 09:39:24 -0500 Subject: [PATCH 4/4] sql: remove unused dropOwnedByNode `DROP OWNED BY` is supported only in the declarative schema changer and `dropOwnedByNode` is not used, so it has been removed. Release note: None --- pkg/sql/drop_owned_by.go | 15 --------------- pkg/sql/walk.go | 1 - 2 files changed, 16 deletions(-) diff --git a/pkg/sql/drop_owned_by.go b/pkg/sql/drop_owned_by.go index c5d99bda19c9..937ea90064af 100644 --- a/pkg/sql/drop_owned_by.go +++ b/pkg/sql/drop_owned_by.go @@ -9,17 +9,10 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/server/telemetry" - "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" ) -// dropOwnedByNode represents a DROP OWNED BY statement. -type dropOwnedByNode struct { - // TODO(angelaw): Uncomment when implementing - commenting out due to linting error. - //n *tree.DropOwnedBy -} - func (p *planner) DropOwnedBy(ctx context.Context) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, @@ -32,11 +25,3 @@ func (p *planner) DropOwnedBy(ctx context.Context) (planNode, error) { // TODO(angelaw): Implementation. return nil, unimplemented.NewWithIssue(55381, "drop owned by is not yet implemented") } - -func (n *dropOwnedByNode) startExec(params runParams) error { - // TODO(angelaw): Implementation. - return nil -} -func (n *dropOwnedByNode) Next(runParams) (bool, error) { return false, nil } -func (n *dropOwnedByNode) Values() tree.Datums { return tree.Datums{} } -func (n *dropOwnedByNode) Close(context.Context) {} diff --git a/pkg/sql/walk.go b/pkg/sql/walk.go index c265786768f5..d53afe016026 100644 --- a/pkg/sql/walk.go +++ b/pkg/sql/walk.go @@ -432,7 +432,6 @@ var planNodeNames = map[reflect.Type]string{ reflect.TypeOf(&ordinalityNode{}): "ordinality", reflect.TypeOf(&projectSetNode{}): "project set", reflect.TypeOf(&reassignOwnedByNode{}): "reassign owned by", - reflect.TypeOf(&dropOwnedByNode{}): "drop owned by", reflect.TypeOf(&recursiveCTENode{}): "recursive cte", reflect.TypeOf(&refreshMaterializedViewNode{}): "refresh materialized view", reflect.TypeOf(&relocateNode{}): "relocate",