Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ADBDEV-4633: Invalidate diskquota.table_size entries during startup #27

Merged
merged 6 commits into from
Nov 22, 2023
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/diskquota.h
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ extern bool diskquota_hardlimit;
extern int SEGCOUNT;
extern int worker_spi_get_extension_version(int *major, int *minor);
extern void truncateStringInfo(StringInfo str, int nchars);
extern List *get_rel_oid_list(void);
extern List *get_rel_oid_list(bool is_init);
extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage, Oid relam);
extern Relation diskquota_relation_open(Oid relid);
extern bool get_rel_name_namespace(Oid relid, Oid *nsOid, char *relname);
Expand Down
16 changes: 12 additions & 4 deletions src/diskquota_utility.c
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ static float4 get_per_segment_ratio(Oid spcoid);
static bool to_delete_quota(QuotaType type, int64 quota_limit_mb, float4 segratio);
static void check_role(Oid roleoid, char *rolname, int64 quota_limit_mb);

List *get_rel_oid_list(void);
List *get_rel_oid_list(bool is_init);
andr-sokolov marked this conversation as resolved.
Show resolved Hide resolved

/* ---- Help Functions to set quota limit. ---- */
/*
Expand Down Expand Up @@ -1294,17 +1294,25 @@ worker_spi_get_extension_version(int *major, int *minor)
* Get the list of oids of the tables which diskquota
* needs to care about in the database.
* Firstly the all the table oids which relkind is 'r'
* or 'm' and not system table.
* or 'm' and not system table. On init stage, oids from
* diskquota.table_size are added to invalidate them.
* Then, fetch the indexes of those tables.
*/
KnightMurloc marked this conversation as resolved.
Show resolved Hide resolved

List *
get_rel_oid_list(void)
get_rel_oid_list(bool is_init)
{
List *oidlist = NIL;
int ret;

ret = SPI_execute_with_args("select oid from pg_class where oid >= $1 and (relkind='r' or relkind='m')", 1,
#define SELECT_FROM_PG_CATALOG_PG_CLASS \
andr-sokolov marked this conversation as resolved.
Show resolved Hide resolved
"select oid from pg_catalog.pg_class where oid >= $1 and (relkind='r' or relkind='m')"
andr-sokolov marked this conversation as resolved.
Show resolved Hide resolved
#define SELECT_FROM_DISKQUOTA_TABLE_SIZE "select tableid from diskquota.table_size where segid = -1"
andr-sokolov marked this conversation as resolved.
Show resolved Hide resolved

ret = SPI_execute_with_args(is_init ? SELECT_FROM_PG_CATALOG_PG_CLASS
" union distinct " SELECT_FROM_DISKQUOTA_TABLE_SIZE
: SELECT_FROM_PG_CATALOG_PG_CLASS,
1,
KnightMurloc marked this conversation as resolved.
Show resolved Hide resolved
(Oid[]){
OIDOID,
},
Expand Down
28 changes: 27 additions & 1 deletion src/quotamodel.c
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,8 @@ static bool get_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag
static void reset_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag);
static void set_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag);

static void delete_from_table_size_map(char *str);

/* add a new entry quota or update the old entry quota */
static void
update_size_for_quota(int64 size, QuotaType type, Oid *keys, int16 segid)
Expand Down Expand Up @@ -923,6 +925,10 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map)
TableEntryKey active_table_key;
List *oidlist;
ListCell *l;
int delete_entries_num = 0;
StringInfoData delete_statement;

initStringInfo(&delete_statement);

/*
* unset is_exist flag for tsentry in table_size_map this is used to
Expand All @@ -939,7 +945,7 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map)
* calculate the file size for active table and update namespace_size_map
* and role_size_map
*/
oidlist = get_rel_oid_list();
oidlist = get_rel_oid_list(is_init);

oidlist = merge_uncommitted_table_to_oidlist(oidlist);

Expand Down Expand Up @@ -973,6 +979,23 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map)
{
elog(WARNING, "cache lookup failed for relation %u", relOid);
LWLockRelease(diskquota_locks.relation_cache_lock);

if (!is_init) continue;

for (int i = -1; i < SEGCOUNT; i++)
{
appendStringInfo(&delete_statement, "%s(%u,%d)", (delete_entries_num == 0) ? " " : ", ", relOid, i);

delete_entries_num++;

if (delete_entries_num > SQL_MAX_VALUES_NUMBER)
andr-sokolov marked this conversation as resolved.
Show resolved Hide resolved
{
delete_from_table_size_map(delete_statement.data);
resetStringInfo(&delete_statement);
delete_entries_num = 0;
}
}

continue;
}
relnamespace = relation_entry->namespaceoid;
Expand Down Expand Up @@ -1112,6 +1135,9 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map)
}
}

if (delete_entries_num) delete_from_table_size_map(delete_statement.data);

pfree(delete_statement.data);
KnightMurloc marked this conversation as resolved.
Show resolved Hide resolved
list_free(oidlist);

/*
Expand Down
49 changes: 49 additions & 0 deletions tests/isolation2/expected/test_dropped.out
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
-- Ensure diskquota does not save information about dropped table during restart cluster by invalidates it at startup

1: CREATE SCHEMA dropped_schema;
CREATE
1: SET search_path TO dropped_schema;
SET
1: SELECT diskquota.set_schema_quota('dropped_schema', '1 MB');
set_schema_quota
------------------

(1 row)
1: SELECT diskquota.wait_for_worker_new_epoch();
wait_for_worker_new_epoch
---------------------------
t
(1 row)
1: CREATE TABLE dropped_table(id int) DISTRIBUTED BY (id);
CREATE
1: INSERT INTO dropped_table SELECT generate_series(1, 100000);
INSERT 100000
-- Wait for the diskquota bgworker refreshing the size of 'dropped_table'.
1: SELECT diskquota.wait_for_worker_new_epoch();
wait_for_worker_new_epoch
---------------------------
t
(1 row)
1: DROP TABLE dropped_table;
DROP
1q: ... <quitting>

-- Restart cluster fastly
!\retcode gpstop -afr;
-- start_ignore
-- end_ignore
(exited with code 0)

-- Indicates that there is no dropped table in pg_catalog.pg_class
1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'dropped_table';
oid
-----
(0 rows)
-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class
1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1;
tableid
---------
(0 rows)
1: DROP SCHEMA dropped_schema CASCADE;
DROP
1q: ... <quitting>
47 changes: 47 additions & 0 deletions tests/isolation2/expected/test_temporary.out
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
-- Ensure diskquota does not save information about temporary table during restart cluster by invalidates it at startup

1: CREATE SCHEMA temporary_schema;
CREATE
1: SET search_path TO temporary_schema;
SET
1: SELECT diskquota.set_schema_quota('temporary_schema', '1 MB');
set_schema_quota
------------------

(1 row)
1: SELECT diskquota.wait_for_worker_new_epoch();
wait_for_worker_new_epoch
---------------------------
t
(1 row)
1: CREATE TEMPORARY TABLE temporary_table(id int) DISTRIBUTED BY (id);
CREATE
1: INSERT INTO temporary_table SELECT generate_series(1, 100000);
INSERT 100000
-- Wait for the diskquota bgworker refreshing the size of 'temporary_table'.
1: SELECT diskquota.wait_for_worker_new_epoch();
wait_for_worker_new_epoch
---------------------------
t
(1 row)
1q: ... <quitting>

-- Restart cluster fastly
!\retcode gpstop -afr;
-- start_ignore
-- end_ignore
(exited with code 0)

-- Indicates that there is no temporary table in pg_catalog.pg_class
1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'temporary_table';
oid
-----
(0 rows)
-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class
1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1;
tableid
---------
(0 rows)
1: DROP SCHEMA temporary_schema CASCADE;
DROP
1q: ... <quitting>
2 changes: 2 additions & 0 deletions tests/isolation2/isolation2_schedule
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ test: test_relation_size
test: test_rejectmap
test: test_vacuum
test: test_truncate
test: test_temporary
test: test_dropped
test: test_postmaster_restart
test: test_worker_timeout
test: test_per_segment_config
Expand Down
22 changes: 22 additions & 0 deletions tests/isolation2/sql/test_dropped.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
-- Ensure diskquota does not save information about dropped table during restart cluster by invalidates it at startup

1: CREATE SCHEMA dropped_schema;
1: SET search_path TO dropped_schema;
1: SELECT diskquota.set_schema_quota('dropped_schema', '1 MB');
1: SELECT diskquota.wait_for_worker_new_epoch();
1: CREATE TABLE dropped_table(id int) DISTRIBUTED BY (id);
1: INSERT INTO dropped_table SELECT generate_series(1, 100000);
-- Wait for the diskquota bgworker refreshing the size of 'dropped_table'.
1: SELECT diskquota.wait_for_worker_new_epoch();
1: DROP TABLE dropped_table;
1q:

-- Restart cluster fastly
!\retcode gpstop -afr;

-- Indicates that there is no dropped table in pg_catalog.pg_class
1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'dropped_table';
-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class
1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1;
1: DROP SCHEMA dropped_schema CASCADE;
1q:
KnightMurloc marked this conversation as resolved.
Show resolved Hide resolved
21 changes: 21 additions & 0 deletions tests/isolation2/sql/test_temporary.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
-- Ensure diskquota does not save information about temporary table during restart cluster by invalidates it at startup

1: CREATE SCHEMA temporary_schema;
1: SET search_path TO temporary_schema;
1: SELECT diskquota.set_schema_quota('temporary_schema', '1 MB');
1: SELECT diskquota.wait_for_worker_new_epoch();
1: CREATE TEMPORARY TABLE temporary_table(id int) DISTRIBUTED BY (id);
1: INSERT INTO temporary_table SELECT generate_series(1, 100000);
-- Wait for the diskquota bgworker refreshing the size of 'temporary_table'.
1: SELECT diskquota.wait_for_worker_new_epoch();
1q:

-- Restart cluster fastly
!\retcode gpstop -afr;

-- Indicates that there is no temporary table in pg_catalog.pg_class
1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'temporary_table';
-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class
1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1;
1: DROP SCHEMA temporary_schema CASCADE;
1q:
Loading