From aff5d218e74ed75d22c8291fc24a907705f799a0 Mon Sep 17 00:00:00 2001 From: Rafi Shamim Date: Mon, 12 Aug 2024 14:33:43 -0400 Subject: [PATCH 1/4] workload/schemachange: avoid commmenting on transient check constraints These check constraints are added temporarily while adding a non-nullable column. We have seen flakes caused by the workload trying to refer to them. Release note: None --- pkg/workload/schemachange/operation_generator.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/workload/schemachange/operation_generator.go b/pkg/workload/schemachange/operation_generator.go index 4523e65240b3..ea63cffef32d 100644 --- a/pkg/workload/schemachange/operation_generator.go +++ b/pkg/workload/schemachange/operation_generator.go @@ -2785,6 +2785,8 @@ func (og *operationGenerator) commentOn(ctx context.Context, tx pgx.Tx) (*opStmt SELECT 'INDEX ' || quote_ident(schema_name) || '.' || quote_ident(table_name) || '@' || quote_ident("index"->>'name') FROM indexes UNION ALL SELECT 'CONSTRAINT ' || quote_ident("constraint"->>'name') || ' ON ' || quote_ident(schema_name) || '.' || quote_ident(table_name) FROM constraints + -- Avoid temporary CHECK constraints created while adding NOT NULL columns. + WHERE "constraint"->>'name' NOT LIKE '%%auto_not_null' %s`, onType)) commentables, err := Collect(ctx, og, tx, pgx.RowTo[string], q) From 55ed1c8e4b9550ae188470f7a8dfb62171132085 Mon Sep 17 00:00:00 2001 From: Naveen Setlur Date: Mon, 12 Aug 2024 16:45:10 -0400 Subject: [PATCH 2/4] roachtest: Fix ycsb workload to avoid column families The update_heavy test had failures because the ycsb workload was using column families, which aren't supported for LDR. Release note: none Fixes: #128740 --- pkg/cmd/roachtest/tests/logical_data_replication.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/cmd/roachtest/tests/logical_data_replication.go b/pkg/cmd/roachtest/tests/logical_data_replication.go index 3e842b17d01e..f12152be4fd8 100644 --- a/pkg/cmd/roachtest/tests/logical_data_replication.go +++ b/pkg/cmd/roachtest/tests/logical_data_replication.go @@ -44,7 +44,7 @@ type ycsbWorkload struct { } func (ycsb ycsbWorkload) sourceInitCmd(tenantName string, nodes option.NodeListOption) string { - cmd := roachtestutil.NewCommand(`./cockroach workload init ycsb`). + cmd := roachtestutil.NewCommand(`./cockroach workload init ycsb --families=false`). MaybeFlag(ycsb.initRows > 0, "insert-count", ycsb.initRows). MaybeFlag(ycsb.initSplits > 0, "splits", ycsb.initSplits). Arg("{pgurl%s:%s}", nodes, tenantName) @@ -52,7 +52,7 @@ func (ycsb ycsbWorkload) sourceInitCmd(tenantName string, nodes option.NodeListO } func (ycsb ycsbWorkload) sourceRunCmd(tenantName string, nodes option.NodeListOption) string { - cmd := roachtestutil.NewCommand(`./cockroach workload run ycsb`). + cmd := roachtestutil.NewCommand(`./cockroach workload run ycsb --families=false`). Option("tolerate-errors"). Flag("workload", ycsb.workloadType). MaybeFlag(ycsb.debugRunDuration > 0, "duration", ycsb.debugRunDuration). From 0c84c138ce0149f82eccd5c74839c5f99786c876 Mon Sep 17 00:00:00 2001 From: Rafi Shamim Date: Mon, 12 Aug 2024 14:44:39 -0400 Subject: [PATCH 3/4] workload/schemachange: only allow PK change in single statement transactions This will avoid errors of the form: "cannot perform other schema changes in the same transaction as a primary key change." Release note: None --- pkg/workload/schemachange/operation_generator.go | 9 ++++++--- pkg/workload/schemachange/schemachange.go | 11 +++++------ 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/pkg/workload/schemachange/operation_generator.go b/pkg/workload/schemachange/operation_generator.go index 4523e65240b3..4a71bfbf949b 100644 --- a/pkg/workload/schemachange/operation_generator.go +++ b/pkg/workload/schemachange/operation_generator.go @@ -182,7 +182,7 @@ func (og *operationGenerator) getSupportedDeclarativeOp( // stochastic attempts and if verbosity is >= 2 the unsuccessful attempts are // recorded in `log` to help with debugging of the workload. func (og *operationGenerator) randOp( - ctx context.Context, tx pgx.Tx, useDeclarativeSchemaChanger, allowDML bool, + ctx context.Context, tx pgx.Tx, useDeclarativeSchemaChanger bool, numOpsInTxn int, ) (stmt *opStmt, err error) { for { var op opType @@ -195,8 +195,11 @@ func (og *operationGenerator) randOp( } else { op = opType(og.params.ops.Int()) } - if !allowDML && isDMLOpType(op) { - continue + if numOpsInTxn != 1 { + // DML and legacy PK changes are only allowed in single-statement transactions. + if isDMLOpType(op) || (op == alterTableAlterPrimaryKey && !useDeclarativeSchemaChanger) { + continue + } } og.resetOpState(useDeclarativeSchemaChanger) diff --git a/pkg/workload/schemachange/schemachange.go b/pkg/workload/schemachange/schemachange.go index f78515baf450..1488d80bb364 100644 --- a/pkg/workload/schemachange/schemachange.go +++ b/pkg/workload/schemachange/schemachange.go @@ -456,12 +456,12 @@ func (w *schemaChangeWorker) runInTxn( ) error { w.logger.startLog(w.id) w.logger.writeLog("BEGIN") - opsNum := 1 + w.opGen.randIntn(w.maxOpsPerWorker) - if useDeclarativeSchemaChanger && opsNum > w.workload.declarativeSchemaMaxStmtsPerTxn { - opsNum = w.workload.declarativeSchemaMaxStmtsPerTxn + numOps := 1 + w.opGen.randIntn(w.maxOpsPerWorker) + if useDeclarativeSchemaChanger && numOps > w.workload.declarativeSchemaMaxStmtsPerTxn { + numOps = w.workload.declarativeSchemaMaxStmtsPerTxn } - for i := 0; i < opsNum; i++ { + for i := 0; i < numOps; i++ { // Terminating this loop early if there are expected commit errors prevents unexpected commit behavior from being // hidden by subsequent operations. Consider the case where there are expected commit errors. // It is possible that committing the transaction now will fail the workload because the error does not occur @@ -474,8 +474,7 @@ func (w *schemaChangeWorker) runInTxn( break } - // Only allow DML for single-statement transactions. - op, err := w.opGen.randOp(ctx, tx, useDeclarativeSchemaChanger, opsNum == 1) + op, err := w.opGen.randOp(ctx, tx, useDeclarativeSchemaChanger, numOps) if pgErr := new(pgconn.PgError); errors.As(err, &pgErr) && pgcode.MakeCode(pgErr.Code) == pgcode.SerializationFailure { return errors.Mark(err, errRunInTxnRbkSentinel) From c4b1b74e8a7d84799ddc2f68fd5324ce04d35d25 Mon Sep 17 00:00:00 2001 From: Rafi Shamim Date: Mon, 12 Aug 2024 17:06:34 -0400 Subject: [PATCH 4/4] schemachanger: remove unneeded function. fallBackIfDescColInRowLevelTTLTables is no longer needed as of e5b30e4a2344a4b0ab1b2cf6a40b134192aeed3d. Release note: None --- .../scbuildstmt/alter_table_alter_primary_key.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go index 10dad9fb7580..f8ecf900a704 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go @@ -71,7 +71,6 @@ func alterPrimaryKey(b BuildCtx, tn *tree.TableName, tbl *scpb.Table, t alterPri fallBackIfShardedIndexExists(b, t, tbl.TableID) fallBackIfPartitionedIndexExists(b, t, tbl.TableID) fallBackIfRegionalByRowTable(b, t.n, tbl.TableID) - fallBackIfDescColInRowLevelTTLTables(b, tbl.TableID, t) fallBackIfSubZoneConfigExists(b, t.n, tbl.TableID) inflatedChain := getInflatedPrimaryIndexChain(b, tbl.TableID) @@ -446,15 +445,6 @@ func fallBackIfRegionalByRowTable(b BuildCtx, t tree.NodeFormatter, tableID cati } } -// fallBackIfDescColInRowLevelTTLTables panics with an unimplemented -// error if the table is a (row-level-ttl table && (it has a descending -// key column || it has any inbound/outbound FK constraint)). -func fallBackIfDescColInRowLevelTTLTables(b BuildCtx, tableID catid.DescID, t alterPrimaryKeySpec) { - if _, _, rowLevelTTLElem := scpb.FindRowLevelTTL(b.QueryByID(tableID)); rowLevelTTLElem == nil { - return - } -} - func mustRetrieveCurrentPrimaryIndexElement( b BuildCtx, tableID catid.DescID, ) (res *scpb.PrimaryIndex) {