diff --git a/src/Makefile.am b/src/Makefile.am index 1a2eed9942..7f05cc6be0 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -123,11 +123,7 @@ main/StellarCoreVersion.cpp: always @if cmp -s $@~ $@; then rm -f $@~; else \ mv -f $@~ $@ && printf "echo '%s' > $@\n" "$$(cat $@)"; fi -if USE_POSTGRES -TESTS=test/selftest-pg -else # !USE_POSTGRES TESTS=test/selftest-nopg -endif # !USE_POSTGRES TESTS += test/check-nondet format: always diff --git a/src/bucket/readme.md b/src/bucket/readme.md index 29d4b81bd8..92f8f5dc77 100644 --- a/src/bucket/readme.md +++ b/src/bucket/readme.md @@ -38,12 +38,11 @@ receives a single large, atomic write whenever a ledger closes and many reads during consensus. There is never a write during a read, so all the reads occur on an effectively immutable database. This write once read many times paradigm is suited better for a LSM structure -(the bucket list) than SQL DBs. In particular, Postgres is ACID compliant and supports -transactions that can be rolled back. This introduces significant, unnecessary overhead. +(the bucket list) than SQL DBs. Since our access pattern never has conflicting reads and writes, ACID compliance is not required. Additionally, the current core implementation never rolls back a SQL transaction. Finally, all reads occur on an immutable database, opening the door for parallelism. -However, MySQL and Postgres do not treat the DB as immutable during reads +However, SQL DBs do not treat the DB as immutable during reads and cannot take advantage of this parallelism. By performing key-value lookup directly on the BucketList, we can remove ACID/rollback overhead, diff --git a/src/bucket/test/BucketTests.cpp b/src/bucket/test/BucketTests.cpp index 550438cac8..d6d1f8e568 100644 --- a/src/bucket/test/BucketTests.cpp +++ b/src/bucket/test/BucketTests.cpp @@ -987,10 +987,4 @@ TEST_CASE("bucket apply bench", "[bucketbench][!hide]") { runtest(Config::TESTDB_ON_DISK_SQLITE); } -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runtest(Config::TESTDB_POSTGRESQL); - } -#endif } diff --git a/src/database/Database.cpp b/src/database/Database.cpp index 8b259570bb..b50a18105a 100644 --- a/src/database/Database.cpp +++ b/src/database/Database.cpp @@ -38,9 +38,6 @@ #include #include -#ifdef USE_POSTGRES -#include -#endif #include #include #include @@ -75,31 +72,6 @@ static int const MIN_SQLITE_MINOR_VERSION = 45; static int const MIN_SQLITE_VERSION = (1000000 * MIN_SQLITE_MAJOR_VERSION) + (1000 * MIN_SQLITE_MINOR_VERSION); -// PostgreSQL pre-10.0 actually used its "minor number" as a major one -// (meaning: 9.4 and 9.5 were considered different major releases, with -// compatibility differences and so forth). After 10.0 they started doing -// what everyone else does, where 10.0 and 10.1 were only "minor". Either -// way though, we have a minimum minor version. -static int const MIN_POSTGRESQL_MAJOR_VERSION = 9; -static int const MIN_POSTGRESQL_MINOR_VERSION = 5; -static int const MIN_POSTGRESQL_VERSION = - (10000 * MIN_POSTGRESQL_MAJOR_VERSION) + - (100 * MIN_POSTGRESQL_MINOR_VERSION); - -#ifdef USE_POSTGRES -static std::string -badPgVersion(int vers) -{ - std::ostringstream msg; - int maj = (vers / 10000); - int min = (vers - (maj * 10000)) / 100; - msg << "PostgreSQL version " << maj << '.' << min - << " is too old, must use at least " << MIN_POSTGRESQL_MAJOR_VERSION - << '.' << MIN_POSTGRESQL_MINOR_VERSION; - return msg.str(); -} -#endif - static std::string badSqliteVersion(int vers) { @@ -118,9 +90,6 @@ Database::registerDrivers() if (!gDriversRegistered) { register_factory_sqlite3(); -#ifdef USE_POSTGRES - register_factory_postgresql(); -#endif gDriversRegistered = true; } } @@ -165,20 +134,6 @@ class DatabaseConfigureSessionOp : public DatabaseTypeSpecificOperation // Register the sqlite carray() extension we use for bulk operations. sqlite3_carray_init(sq->conn_, nullptr, nullptr); } -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - int vers = PQserverVersion(pg->conn_); - if (vers < MIN_POSTGRESQL_VERSION) - { - throw std::runtime_error(badPgVersion(vers)); - } - mSession - << "SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL " - "SERIALIZABLE"; - } -#endif }; Database::Database(Application& app) @@ -381,18 +336,6 @@ Database::getUpsertTimer(std::string const& entityName) .TimeScope(); } -void -Database::setCurrentTransactionReadOnly() -{ - if (!isSqlite()) - { - auto prep = getPreparedStatement("SET TRANSACTION READ ONLY"); - auto& st = prep.statement(); - st.define_and_bind(); - st.execute(false); - } -} - bool Database::isSqlite() const { @@ -400,19 +343,6 @@ Database::isSqlite() const std::string::npos; } -std::string -Database::getSimpleCollationClause() const -{ - if (isSqlite()) - { - return ""; - } - else - { - return " COLLATE \"C\" "; - } -} - bool Database::canUsePool() const { diff --git a/src/database/Database.h b/src/database/Database.h index e3ad43b214..eeaefdcc53 100644 --- a/src/database/Database.h +++ b/src/database/Database.h @@ -64,10 +64,9 @@ class StatementContext : NonCopyable * Object that owns the database connection(s) that an application * uses to store the current ledger and other persistent state in. * - * This may represent an in-memory SQLite instance (for testing), an on-disk - * SQLite instance (for running a minimal, self-contained server) or a - * connection to a local Postgresql database, that the node operator must have - * set up on their own. + * This may represent an in-memory SQLite instance (for testing), or an on-disk + * SQLite instance (for running a minimal, self-contained server) that the node + * operator must have set up on their own. * * Database connects, on construction, to the target specified by the * application Config object's Config::DATABASE value; this originates from the @@ -81,8 +80,7 @@ class StatementContext : NonCopyable * worker thread. * * All database connections and transactions are set to snapshot isolation level - * (SQL isolation level 'SERIALIZABLE' in Postgresql and Sqlite, neither of - * which provide true serializability). + * (unlike SQL isolation level 'SERIALIZABLE' in Sqlite, which does not provides true serializability). */ class Database : NonMovableOrCopyable { @@ -134,21 +132,9 @@ class Database : NonMovableOrCopyable medida::TimerContext getUpdateTimer(std::string const& entityName); medida::TimerContext getUpsertTimer(std::string const& entityName); - // If possible (i.e. "on postgres") issue an SQL pragma that marks - // the current transaction as read-only. The effects of this last - // only as long as the current SQL transaction. - void setCurrentTransactionReadOnly(); - // Return true if the Database target is SQLite, otherwise false. bool isSqlite() const; - // Return an optional SQL COLLATION clause to use for text-typed columns in - // this database, in order to ensure they're compared "simply" using - // byte-value comparisons, i.e. in a non-language-sensitive fashion. For - // Postgresql this will be 'COLLATE "C"' and for SQLite, nothing (its - // defaults are correct already). - std::string getSimpleCollationClause() const; - // Call `op` back with the specific database backend subtype in use. template T doDatabaseTypeSpecificOperation(DatabaseTypeSpecificOperation& op); @@ -192,12 +178,6 @@ doDatabaseTypeSpecificOperation(soci::session& session, if (auto sq = dynamic_cast(b)) { return op.doSqliteSpecificOperation(sq); -#ifdef USE_POSTGRES - } - else if (auto pg = dynamic_cast(b)) - { - return op.doPostgresSpecificOperation(pg); -#endif } else { diff --git a/src/database/DatabaseTypeSpecificOperation.h b/src/database/DatabaseTypeSpecificOperation.h index 344d900ac6..462d691ad2 100644 --- a/src/database/DatabaseTypeSpecificOperation.h +++ b/src/database/DatabaseTypeSpecificOperation.h @@ -5,9 +5,6 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include -#ifdef USE_POSTGRES -#include -#endif // Just a visitor type to help write code that's database-specific. // See Database::doDatabaseTypeSpecificOperation. @@ -17,9 +14,5 @@ template class DatabaseTypeSpecificOperation { public: virtual T doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) = 0; -#ifdef USE_POSTGRES - virtual T - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) = 0; -#endif }; } diff --git a/src/database/test/DatabaseConnectionStringTest.cpp b/src/database/test/DatabaseConnectionStringTest.cpp index f810d0ef12..5de79de64a 100644 --- a/src/database/test/DatabaseConnectionStringTest.cpp +++ b/src/database/test/DatabaseConnectionStringTest.cpp @@ -19,124 +19,124 @@ TEST_CASE("remove password from database connection string", SECTION("password is removed if first") { REQUIRE(removePasswordFromConnectionString( - R"(postgresql://password=abc dbname=stellar)") == - R"(postgresql://password=******** dbname=stellar)"); + R"(sqlite3://password=abc dbname=stellar)") == + R"(sqlite3://password=******** dbname=stellar)"); } SECTION("password is removed if second") { REQUIRE(removePasswordFromConnectionString( - R"(postgresql://dbname=stellar password=dbname)") == - R"(postgresql://dbname=stellar password=********)"); + R"(sqlite3://dbname=stellar password=dbname)") == + R"(sqlite3://dbname=stellar password=********)"); } SECTION("database can be named password") { REQUIRE(removePasswordFromConnectionString( - R"(postgresql://dbname=password password=dbname)") == - R"(postgresql://dbname=password password=********)"); + R"(sqlite3://dbname=password password=dbname)") == + R"(sqlite3://dbname=password password=********)"); } SECTION("quoted password is removed") { REQUIRE( removePasswordFromConnectionString( - R"(postgresql://dbname=stellar password='quoted password')") == - R"(postgresql://dbname=stellar password=********)"); + R"(sqlite3://dbname=stellar password='quoted password')") == + R"(sqlite3://dbname=stellar password=********)"); } SECTION("quoted password with quote is removed") { REQUIRE( removePasswordFromConnectionString( - R"(postgresql://dbname=stellar password='quoted \' password')") == - R"(postgresql://dbname=stellar password=********)"); + R"(sqlite3://dbname=stellar password='quoted \' password')") == + R"(sqlite3://dbname=stellar password=********)"); } SECTION("quoted password with backslash is removed") { REQUIRE( removePasswordFromConnectionString( - R"(postgresql://dbname=stellar password='quoted \\ password')") == - R"(postgresql://dbname=stellar password=********)"); + R"(sqlite3://dbname=stellar password='quoted \\ password')") == + R"(sqlite3://dbname=stellar password=********)"); } SECTION("quoted password with backslash and quote is removed") { REQUIRE( removePasswordFromConnectionString( - R"(postgresql://dbname=stellar password='quoted \\ password')") == - R"(postgresql://dbname=stellar password=********)"); + R"(sqlite3://dbname=stellar password='quoted \\ password')") == + R"(sqlite3://dbname=stellar password=********)"); } SECTION("parameters after password remain unchanged") { REQUIRE( removePasswordFromConnectionString( - R"(postgresql://dbname=stellar password='quoted \\ password' performance='as fast as possible')") == - R"(postgresql://dbname=stellar password=******** performance='as fast as possible')"); + R"(sqlite3://dbname=stellar password='quoted \\ password' performance='as fast as possible')") == + R"(sqlite3://dbname=stellar password=******** performance='as fast as possible')"); } SECTION("dbname can be quored") { REQUIRE( removePasswordFromConnectionString( - R"(postgresql://dbname='stellar with spaces' password='quoted \\ password' performance='as fast as possible')") == - R"(postgresql://dbname='stellar with spaces' password=******** performance='as fast as possible')"); + R"(sqlite3://dbname='stellar with spaces' password='quoted \\ password' performance='as fast as possible')") == + R"(sqlite3://dbname='stellar with spaces' password=******** performance='as fast as possible')"); } SECTION("spaces before equals are accepted") { REQUIRE( removePasswordFromConnectionString( - R"(postgresql://dbname ='stellar with spaces' password ='quoted \\ password' performance ='as fast as possible')") == - R"(postgresql://dbname ='stellar with spaces' password =******** performance ='as fast as possible')"); + R"(sqlite3://dbname ='stellar with spaces' password ='quoted \\ password' performance ='as fast as possible')") == + R"(sqlite3://dbname ='stellar with spaces' password =******** performance ='as fast as possible')"); } SECTION("spaces after equals are accepted") { REQUIRE( removePasswordFromConnectionString( - R"(postgresql://dbname= 'stellar with spaces' password= 'quoted \\ password' performance= 'as fast as possible')") == - R"(postgresql://dbname= 'stellar with spaces' password= ******** performance= 'as fast as possible')"); + R"(sqlite3://dbname= 'stellar with spaces' password= 'quoted \\ password' performance= 'as fast as possible')") == + R"(sqlite3://dbname= 'stellar with spaces' password= ******** performance= 'as fast as possible')"); } SECTION("spaces around equals are accepted") { REQUIRE( removePasswordFromConnectionString( - R"(postgresql://dbname = 'stellar with spaces' password = 'quoted \\ password' performance = 'as fast as possible')") == - R"(postgresql://dbname = 'stellar with spaces' password = ******** performance = 'as fast as possible')"); + R"(sqlite3://dbname = 'stellar with spaces' password = 'quoted \\ password' performance = 'as fast as possible')") == + R"(sqlite3://dbname = 'stellar with spaces' password = ******** performance = 'as fast as possible')"); } SECTION( "invalid connection string without equals and value remains as it was") { REQUIRE(removePasswordFromConnectionString( - R"(postgresql://dbname password=asbc)") == - R"(postgresql://dbname password=asbc)"); + R"(sqlite3://dbname password=asbc)") == + R"(sqlite3://dbname password=asbc)"); } SECTION("invalid connection string without value remains as it was") { REQUIRE(removePasswordFromConnectionString( - R"(postgresql://dbname= password=asbc)") == - R"(postgresql://dbname= password=asbc)"); + R"(sqlite3://dbname= password=asbc)") == + R"(sqlite3://dbname= password=asbc)"); } SECTION("invalid connection string with unfinished quoted value") { REQUIRE(removePasswordFromConnectionString( - R"(postgresql://dbname='quoted value)") == - R"(postgresql://dbname='quoted value)"); + R"(sqlite3://dbname='quoted value)") == + R"(sqlite3://dbname='quoted value)"); } SECTION("invalid connection string with quoted value with unfinished " "escape sequence") { REQUIRE(removePasswordFromConnectionString( - R"(postgresql://dbname='quoted value\ password=abc)") == - R"(postgresql://dbname='quoted value\ password=abc)"); + R"(sqlite3://dbname='quoted value\ password=abc)") == + R"(sqlite3://dbname='quoted value\ password=abc)"); } SECTION("invalid connection string without backend name") @@ -159,11 +159,11 @@ TEST_CASE("remove password from database connection string", // really need to allow '\S' or [^[:space:]]. This manifests as a match // failure -- and thereby leads to a failure-to-scrub -- when someone // writes /some/path/with/slashes as a bareword. This is legal as a - // token in a PostgreSQL connect string, but we failed to recognize it + // token in a sqlite3 connect string, but we failed to recognize it // as such before. REQUIRE( removePasswordFromConnectionString( - R"(postgresql://dbname=stellar user=stellar password=thisshouldbesecret host=/var/run/postgresql/)") == - R"(postgresql://dbname=stellar user=stellar password=******** host=/var/run/postgresql/)"); + R"(sqlite3://dbname=stellar user=stellar password=thisshouldbesecret host=/var/run/sqlite3/)") == + R"(sqlite3://dbname=stellar user=stellar password=******** host=/var/run/sqlite3/)"); } } diff --git a/src/database/test/DatabaseTests.cpp b/src/database/test/DatabaseTests.cpp index 2b05aca896..2079946a16 100644 --- a/src/database/test/DatabaseTests.cpp +++ b/src/database/test/DatabaseTests.cpp @@ -148,9 +148,7 @@ checkMVCCIsolation(Application::pointer app) { // Try to modify through sess2; this _would_ upgrade the read-lock // on the row or page in question to a write lock, but that would - // collide with tx1's write-lock via sess1, so it throws. On - // postgres - // this just blocks, so we only check on sqlite. + // collide with tx1's write-lock via sess1, so it throws. CLOG_DEBUG(Database, "Checking failure to upgrade read lock " "to conflicting write lock"); @@ -207,146 +205,6 @@ TEST_CASE("sqlite MVCC test", "[db]") checkMVCCIsolation(app); } -#ifdef USE_POSTGRES -TEST_CASE("postgres smoketest", "[db]") -{ - Config const& cfg = getTestConfig(0, Config::TESTDB_POSTGRESQL); - VirtualClock clock; - try - { - Application::pointer app = createTestApplication(clock, cfg); - int a = 10, b = 0; - - auto& session = app->getDatabase().getSession(); - - SECTION("round trip") - { - transactionTest(app); - } - - SECTION("blob storage") - { - soci::transaction tx(session); - std::vector x = {0, 1, 2, 3, 4, 5, 6}, y; - soci::blob blobX(session); - blobX.append(reinterpret_cast(x.data()), x.size()); - session << "drop table if exists test"; - session << "create table test (a integer, b oid)"; - session << "insert into test (a, b) values (:aa, :bb)", - soci::use(a, "aa"), soci::use(blobX, "bb"); - - soci::blob blobY(session); - session << "select a, b from test", soci::into(b), - soci::into(blobY); - y.resize(blobY.get_len()); - blobY.read(0, reinterpret_cast(y.data()), y.size()); - CHECK(x == y); - LOG_DEBUG(DEFAULT_LOG, - "blob round trip with postgresql database: {} == {}", - binToHex(x), binToHex(y)); - tx.commit(); - } - - SECTION("postgres MVCC test") - { - app->getDatabase().getSession() << "drop table if exists test"; - checkMVCCIsolation(app); - } - } - catch (soci::soci_error& err) - { - std::string what(err.what()); - - if (what.find("Cannot establish connection") != std::string::npos) - { - LOG_WARNING(DEFAULT_LOG, "Cannot connect to postgres server {}", - what); - } - else - { - LOG_ERROR(DEFAULT_LOG, "DB error: {}", what); - REQUIRE(0); - } - } -} - -TEST_CASE("postgres performance", "[db][pgperf][!hide]") -{ - Config cfg(getTestConfig(0, Config::TESTDB_POSTGRESQL)); - VirtualClock clock; - stellar::uniform_int_distribution dist; - - try - { - Application::pointer app = createTestApplication(clock, cfg); - auto& session = app->getDatabase().getSession(); - - session << "drop table if exists txtest;"; - session << "create table txtest (a bigint, b bigint, c bigint, primary " - "key (a, b));"; - - int64_t pk = 0; - int64_t sz = 10000; - int64_t div = 100; - - LOG_INFO(DEFAULT_LOG, "timing 10 inserts of {} rows", sz); - { - for (int64_t i = 0; i < 10; ++i) - { - soci::transaction sqltx(session); - for (int64_t j = 0; j < sz; ++j) - { - int64_t r = dist(gRandomEngine); - session << "insert into txtest (a,b,c) values (:a,:b,:c)", - soci::use(r), soci::use(pk), soci::use(j); - } - sqltx.commit(); - } - } - - LOG_INFO(DEFAULT_LOG, - "retiming 10 inserts of {} rows batched into {} " - "subtransactions of {} inserts each", - sz, sz / div, div); - soci::transaction sqltx(session); - for (int64_t i = 0; i < 10; ++i) - { - for (int64_t j = 0; j < sz / div; ++j) - { - soci::transaction subtx(session); - for (int64_t k = 0; k < div; ++k) - { - int64_t r = dist(gRandomEngine); - pk++; - session << "insert into txtest (a,b,c) values (:a,:b,:c)", - soci::use(r), soci::use(pk), soci::use(k); - } - subtx.commit(); - } - } - { - sqltx.commit(); - } - } - catch (soci::soci_error& err) - { - std::string what(err.what()); - - if (what.find("Cannot establish connection") != std::string::npos) - { - LOG_WARNING(DEFAULT_LOG, "Cannot connect to postgres server {}", - what); - } - else - { - LOG_ERROR(DEFAULT_LOG, "DB error: {}", what); - REQUIRE(0); - } - } -} - -#endif - TEST_CASE("schema test", "[db]") { Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_SQLITE); diff --git a/src/herder/test/HerderTests.cpp b/src/herder/test/HerderTests.cpp index 48dfe8eb8a..3ea9e2a617 100644 --- a/src/herder/test/HerderTests.cpp +++ b/src/herder/test/HerderTests.cpp @@ -3084,12 +3084,6 @@ TEST_CASE("SCP State", "[herder]") { configure(Config::TestDbMode::TESTDB_ON_DISK_SQLITE); } -#ifdef USE_POSTGRES - SECTION("postgres") - { - configure(Config::TestDbMode::TESTDB_POSTGRESQL); - } -#endif // add node0 and node1, in lockstep { SCPQuorumSet qSet; diff --git a/src/history/test/HistoryTests.cpp b/src/history/test/HistoryTests.cpp index 7e4821f039..0e260abcb3 100644 --- a/src/history/test/HistoryTests.cpp +++ b/src/history/test/HistoryTests.cpp @@ -617,10 +617,6 @@ dbModeName(Config::TestDbMode mode) return "TESTDB_IN_MEMORY_SQLITE"; case Config::TESTDB_ON_DISK_SQLITE: return "TESTDB_ON_DISK_SQLITE"; -#ifdef USE_POSTGRES - case Config::TESTDB_POSTGRESQL: - return "TESTDB_POSTGRESQL"; -#endif default: abort(); } @@ -744,11 +740,6 @@ TEST_CASE("History catchup with different modes", std::vector dbModes = {Config::TESTDB_IN_MEMORY_SQLITE, Config::TESTDB_ON_DISK_SQLITE}; -#ifdef USE_POSTGRES - if (!force_sqlite) - dbModes.push_back(Config::TESTDB_POSTGRESQL); -#endif - for (auto dbMode : dbModes) { for (auto count : counts) diff --git a/src/ledger/LedgerHeaderUtils.cpp b/src/ledger/LedgerHeaderUtils.cpp index 372915d1f0..d6e3b00681 100644 --- a/src/ledger/LedgerHeaderUtils.cpp +++ b/src/ledger/LedgerHeaderUtils.cpp @@ -217,11 +217,9 @@ copyToStream(Database& db, soci::session& sess, uint32_t ledgerSeq, void dropAll(Database& db) { - std::string coll = db.getSimpleCollationClause(); - db.getSession() << "DROP TABLE IF EXISTS ledgerheaders;"; db.getSession() << "CREATE TABLE ledgerheaders (" - << "ledgerhash CHARACTER(64) " << coll + << "ledgerhash CHARACTER(64) " << " PRIMARY KEY," << "prevhash CHARACTER(64) NOT NULL," "bucketlisthash CHARACTER(64) NOT NULL," diff --git a/src/ledger/LedgerTxn.cpp b/src/ledger/LedgerTxn.cpp index a869193855..1e7e82097a 100644 --- a/src/ledger/LedgerTxn.cpp +++ b/src/ledger/LedgerTxn.cpp @@ -2747,8 +2747,8 @@ LedgerTxnRoot::Impl::commitChild(EntryIterator iter, TracyPlot("ledger.entry.commit", counter); // NB: we want to clear the prepared statement cache _before_ - // committing; on postgres this doesn't matter but on SQLite the passive - // WAL-auto-checkpointing-at-commit behaviour will starve if there are + // committing; on SQLite the passive WAL-auto-checkpointing-at-commit + // behaviour will starve if there are // still prepared statements open at commit time. mApp.getDatabase().clearPreparedStatementCache(); ZoneNamedN(commitZone, "SOCI commit", true); diff --git a/src/ledger/LedgerTxnAccountSQL.cpp b/src/ledger/LedgerTxnAccountSQL.cpp index db51158f65..e73be8f1ea 100644 --- a/src/ledger/LedgerTxnAccountSQL.cpp +++ b/src/ledger/LedgerTxnAccountSQL.cpp @@ -281,88 +281,6 @@ class BulkUpsertAccountsOperation : public DatabaseTypeSpecificOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs, strBalances, strSeqNums, strSubEntryNums, - strInflationDests, strFlags, strHomeDomains, strThresholds, - strSigners, strLastModifieds, strExtensions, strLedgerExtensions; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strBalances, mBalances); - marshalToPGArray(conn, strSeqNums, mSeqNums); - marshalToPGArray(conn, strSubEntryNums, mSubEntryNums); - marshalToPGArray(conn, strInflationDests, mInflationDests, - &mInflationDestInds); - marshalToPGArray(conn, strFlags, mFlags); - marshalToPGArray(conn, strHomeDomains, mHomeDomains); - marshalToPGArray(conn, strThresholds, mThresholds); - marshalToPGArray(conn, strSigners, mSigners, &mSignerInds); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - marshalToPGArray(conn, strExtensions, mExtensions, &mExtensionInds); - marshalToPGArray(conn, strLedgerExtensions, mLedgerExtensions); - - std::string sql = "WITH r AS (SELECT " - "unnest(:ids::TEXT[]), " - "unnest(:v1::BIGINT[]), " - "unnest(:v2::BIGINT[]), " - "unnest(:v3::INT[]), " - "unnest(:v4::TEXT[]), " - "unnest(:v5::TEXT[]), " - "unnest(:v6::TEXT[]), " - "unnest(:v7::TEXT[]), " - "unnest(:v8::INT[]), " - "unnest(:v9::INT[]), " - "unnest(:v10::TEXT[]), " - "unnest(:v11::TEXT[]) " - ")" - "INSERT INTO accounts ( " - "accountid, balance, seqnum, " - "numsubentries, inflationdest, homedomain, " - "thresholds, signers, " - "flags, lastmodified, extension, " - "ledgerext " - ") SELECT * FROM r " - "ON CONFLICT (accountid) DO UPDATE SET " - "balance = excluded.balance, " - "seqnum = excluded.seqnum, " - "numsubentries = excluded.numsubentries, " - "inflationdest = excluded.inflationdest, " - "homedomain = excluded.homedomain, " - "thresholds = excluded.thresholds, " - "signers = excluded.signers, " - "flags = excluded.flags, " - "lastmodified = excluded.lastmodified, " - "extension = excluded.extension, " - "ledgerext = excluded.ledgerext"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strBalances)); - st.exchange(soci::use(strSeqNums)); - st.exchange(soci::use(strSubEntryNums)); - st.exchange(soci::use(strInflationDests)); - st.exchange(soci::use(strHomeDomains)); - st.exchange(soci::use(strThresholds)); - st.exchange(soci::use(strSigners)); - st.exchange(soci::use(strFlags)); - st.exchange(soci::use(strLastModifieds)); - st.exchange(soci::use(strExtensions)); - st.exchange(soci::use(strLedgerExtensions)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("account"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; class BulkDeleteAccountsOperation : public DatabaseTypeSpecificOperation @@ -412,31 +330,6 @@ class BulkDeleteAccountsOperation : public DatabaseTypeSpecificOperation doSociGenericOperation(); } -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - PGconn* conn = pg->conn_; - std::string strAccountIDs; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - std::string sql = - "WITH r AS (SELECT unnest(:ids::TEXT[])) " - "DELETE FROM accounts WHERE accountid IN (SELECT * FROM r)"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("account"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -471,12 +364,10 @@ LedgerTxnRoot::Impl::dropAccounts(bool rebuild) if (rebuild) { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - mApp.getDatabase().getSession() << "CREATE TABLE accounts" << "(" - << "accountid VARCHAR(56) " << coll << " PRIMARY KEY," + << "accountid VARCHAR(56) PRIMARY KEY," << "balance BIGINT NOT NULL CHECK (balance >= 0)," "buyingliabilities BIGINT CHECK (buyingliabilities >= 0)," "sellingliabilities BIGINT CHECK (sellingliabilities >= 0)," @@ -492,12 +383,6 @@ LedgerTxnRoot::Impl::dropAccounts(bool rebuild) "extension TEXT," "ledgerext TEXT NOT NULL" ");"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() << "ALTER TABLE accounts " - << "ALTER COLUMN accountid " - << "TYPE VARCHAR(56) COLLATE \"C\""; - } } } @@ -635,28 +520,6 @@ class BulkLoadAccountsOperation sqlite3_bind_int(st, 2, static_cast(accountIDcstrs.size())); return executeAndFetch(prep.statement()); } - -#ifdef USE_POSTGRES - virtual std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs; - marshalToPGArray(pg->conn_, strAccountIDs, mAccountIDs); - - std::string sql = - "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT accountid, balance, seqnum, numsubentries, " - "inflationdest, homedomain, thresholds, flags, lastmodified, " - "extension, signers, ledgerext" - " FROM accounts " - "WHERE accountid IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - return executeAndFetch(st); - } -#endif }; UnorderedMap> diff --git a/src/ledger/LedgerTxnClaimableBalanceSQL.cpp b/src/ledger/LedgerTxnClaimableBalanceSQL.cpp index e952589209..e37b6833e2 100644 --- a/src/ledger/LedgerTxnClaimableBalanceSQL.cpp +++ b/src/ledger/LedgerTxnClaimableBalanceSQL.cpp @@ -116,25 +116,6 @@ class BulkLoadClaimableBalanceOperation sqlite3_bind_int(st, 2, static_cast(cstrBalanceIDs.size())); return executeAndFetch(prep.statement()); } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strBalanceIDs; - marshalToPGArray(pg->conn_, strBalanceIDs, mBalanceIDs); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT balanceid, ledgerentry " - "FROM claimablebalance " - "WHERE balanceid IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strBalanceIDs)); - return executeAndFetch(st); - } -#endif }; UnorderedMap> @@ -200,33 +181,6 @@ class BulkDeleteClaimableBalanceOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strBalanceIDs; - marshalToPGArray(pg->conn_, strBalanceIDs, mBalanceIDs); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "DELETE FROM claimablebalance " - "WHERE balanceid IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strBalanceIDs)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("claimablebalance"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mBalanceIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -301,46 +255,6 @@ class BulkUpsertClaimableBalanceOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strBalanceIDs, strClaimableBalanceEntry, strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strBalanceIDs, mBalanceIDs); - marshalToPGArray(conn, strClaimableBalanceEntry, - mClaimableBalanceEntrys); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS " - "(SELECT unnest(:ids::TEXT[]), unnest(:v1::TEXT[]), " - "unnest(:v2::INT[]))" - "INSERT INTO claimablebalance " - "(balanceid, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (balanceid) DO UPDATE SET " - "balanceid = excluded.balanceid, ledgerentry = " - "excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strBalanceIDs)); - st.exchange(soci::use(strClaimableBalanceEntry)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("claimablebalance"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mBalanceIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -362,10 +276,9 @@ LedgerTxnRoot::Impl::dropClaimableBalances(bool rebuild) if (rebuild) { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); mApp.getDatabase().getSession() << "CREATE TABLE claimablebalance (" - << "balanceid VARCHAR(48) " << coll << " PRIMARY KEY, " + << "balanceid VARCHAR(48) PRIMARY KEY, " << "ledgerentry TEXT NOT NULL, " << "lastmodified INT NOT NULL);"; } diff --git a/src/ledger/LedgerTxnConfigSettingSQL.cpp b/src/ledger/LedgerTxnConfigSettingSQL.cpp index d06282e203..69cf412b60 100644 --- a/src/ledger/LedgerTxnConfigSettingSQL.cpp +++ b/src/ledger/LedgerTxnConfigSettingSQL.cpp @@ -120,25 +120,6 @@ class bulkLoadConfigSettingsOperation sqlite3_bind_int(st, 2, static_cast(mConfigSettingIDs.size())); return executeAndFetch(prep.statement()); } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strConfigSettingIDs; - marshalToPGArray(pg->conn_, strConfigSettingIDs, mConfigSettingIDs); - - std::string sql = "WITH r AS (SELECT unnest(:v1::INT[])) " - "SELECT ledgerentry " - "FROM configsettings " - "WHERE configsettingid IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strConfigSettingIDs)); - return executeAndFetch(st); - } -#endif }; UnorderedMap> @@ -222,46 +203,6 @@ class bulkUpsertConfigSettingsOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strConfigSettingIDs, strConfigSettingEntries, - strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strConfigSettingIDs, mConfigSettingIDs); - marshalToPGArray(conn, strConfigSettingEntries, mConfigSettingEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS " - "(SELECT unnest(:ids::INT[]), unnest(:v1::TEXT[]), " - "unnest(:v2::INT[])) " - "INSERT INTO configsettings " - "(configsettingid, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (configsettingid) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strConfigSettingIDs)); - st.exchange(soci::use(strConfigSettingEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("configsetting"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != - mConfigSettingIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -283,11 +224,10 @@ LedgerTxnRoot::Impl::dropConfigSettings(bool rebuild) if (rebuild) { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); mApp.getDatabase().getSession() << "CREATE TABLE configsettings (" << "configsettingid INT PRIMARY KEY, " - << "ledgerentry TEXT " << coll << " NOT NULL, " + << "ledgerentry TEXT NOT NULL, " << "lastmodified INT NOT NULL);"; } } diff --git a/src/ledger/LedgerTxnContractCodeSQL.cpp b/src/ledger/LedgerTxnContractCodeSQL.cpp index 0421e8996c..f6892f1b65 100644 --- a/src/ledger/LedgerTxnContractCodeSQL.cpp +++ b/src/ledger/LedgerTxnContractCodeSQL.cpp @@ -124,25 +124,6 @@ class BulkLoadContractCodeOperation sqlite3_bind_int(st, 2, static_cast(cStrHashes.size())); return executeAndFetch(prep.statement()); } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strHashes; - marshalToPGArray(pg->conn_, strHashes, mHashes); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT ledgerentry " - "FROM contractcode " - "WHERE (hash) IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strHashes)); - return executeAndFetch(st); - } -#endif }; UnorderedMap> @@ -207,33 +188,6 @@ class BulkDeleteContractCodeOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strHashes; - marshalToPGArray(pg->conn_, strHashes, mHashes); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "DELETE FROM contractcode " - "WHERE hash IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strHashes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("contractcode"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mHashes.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -308,43 +262,6 @@ class BulkUpsertContractCodeOperation doSociGenericOperation(); } -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strHashes, strContractCodeEntries, strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strHashes, mHashes); - marshalToPGArray(conn, strContractCodeEntries, mContractCodeEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS " - "(SELECT unnest(:v1::TEXT[]), " - "unnest(:v1::TEXT[]), unnest(:v2::INT[])) " - "INSERT INTO contractcode " - "(hash, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (hash) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strHashes)); - st.exchange(soci::use(strContractCodeEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("contractcode"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mHashes.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -362,24 +279,16 @@ LedgerTxnRoot::Impl::dropContractCode(bool rebuild) mEntryCache.clear(); mBestOffers.clear(); - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS contractcode;"; if (rebuild) { mApp.getDatabase().getSession() << "CREATE TABLE contractcode (" - << "hash TEXT " << coll << " NOT NULL, " - << "ledgerentry TEXT " << coll << " NOT NULL, " + << "hash TEXT NOT NULL, " + << "ledgerentry TEXT NOT NULL, " << "lastmodified INT NOT NULL, " << "PRIMARY KEY (hash));"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() << "ALTER TABLE contractcode " - << "ALTER COLUMN hash " - << "TYPE TEXT COLLATE \"C\";"; - } } } diff --git a/src/ledger/LedgerTxnContractDataSQL.cpp b/src/ledger/LedgerTxnContractDataSQL.cpp index a7f716a561..4ab4136ec7 100644 --- a/src/ledger/LedgerTxnContractDataSQL.cpp +++ b/src/ledger/LedgerTxnContractDataSQL.cpp @@ -161,30 +161,6 @@ class BulkLoadContractDataOperation sqlite3_bind_int(st, 6, static_cast(mTypes.size())); return executeAndFetch(prep.statement()); } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strContractIDs, strKeys, strTypes; - marshalToPGArray(pg->conn_, strContractIDs, mContractIDs); - marshalToPGArray(pg->conn_, strKeys, mKeys); - marshalToPGArray(pg->conn_, strTypes, mTypes); - - std::string sql = "WITH r AS (SELECT unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[]), unnest(:v2::INT[])) " - "SELECT ledgerentry " - "FROM contractdata " - "WHERE (contractid, key, type) IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strContractIDs)); - st.exchange(soci::use(strKeys)); - st.exchange(soci::use(strTypes)); - return executeAndFetch(st); - } -#endif }; UnorderedMap> @@ -258,39 +234,6 @@ class BulkDeleteContractDataOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strContractIDs, strKeys, strTypes; - marshalToPGArray(pg->conn_, strContractIDs, mContractIDs); - marshalToPGArray(pg->conn_, strKeys, mKeys); - marshalToPGArray(pg->conn_, strTypes, mTypes); - - std::string sql = "WITH r AS (SELECT unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[]), unnest(:v2::INT[])) " - "DELETE FROM contractdata " - "WHERE (contractid, key, type) IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strContractIDs)); - st.exchange(soci::use(strKeys)); - st.exchange(soci::use(strTypes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("contractdata"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != - mContractIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -371,50 +314,6 @@ class BulkUpsertContractDataOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strContractIDs, strKeys, strTypes, strContractDataEntries, - strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strContractIDs, mContractIDs); - marshalToPGArray(conn, strKeys, mKeys); - marshalToPGArray(conn, strTypes, mTypes); - marshalToPGArray(conn, strContractDataEntries, mContractDataEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = - "WITH r AS " - "(SELECT unnest(:ids::TEXT[]), unnest(:v1::TEXT[]), " - "unnest(:v2::INT[]), unnest(:v3::TEXT[]), unnest(:v4::INT[])) " - "INSERT INTO contractdata " - "(contractid, key, type, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (contractid,key,type) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strContractIDs)); - st.exchange(soci::use(strKeys)); - st.exchange(soci::use(strTypes)); - st.exchange(soci::use(strContractDataEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("contractdata"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mContractIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -436,25 +335,14 @@ LedgerTxnRoot::Impl::dropContractData(bool rebuild) if (rebuild) { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); mApp.getDatabase().getSession() << "CREATE TABLE contractdata (" - << "contractid TEXT " << coll << " NOT NULL, " - << "key TEXT " << coll << " NOT NULL, " + << "contractid TEXT NOT NULL, " + << "key TEXT NOT NULL, " << "type INT NOT NULL, " - << "ledgerentry TEXT " << coll << " NOT NULL, " + << "ledgerentry TEXT NOT NULL, " << "lastmodified INT NOT NULL, " << "PRIMARY KEY (contractid, key, type));"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() << "ALTER TABLE contractdata " - << "ALTER COLUMN contractid " - << "TYPE TEXT COLLATE \"C\"," - << "ALTER COLUMN key " - << "TYPE TEXT COLLATE \"C\"," - << "ALTER COLUMN type " - << "TYPE INT;"; - } } } diff --git a/src/ledger/LedgerTxnDataSQL.cpp b/src/ledger/LedgerTxnDataSQL.cpp index a17a38b208..01227fd6c8 100644 --- a/src/ledger/LedgerTxnDataSQL.cpp +++ b/src/ledger/LedgerTxnDataSQL.cpp @@ -159,57 +159,6 @@ class BulkUpsertDataOperation : public DatabaseTypeSpecificOperation { doSociGenericOperation(); } -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs, strDataNames, strDataValues, - strLastModifieds, strExtensions, strLedgerExtensions; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strDataNames, mDataNames); - marshalToPGArray(conn, strDataValues, mDataValues); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - marshalToPGArray(conn, strExtensions, mExtensions); - marshalToPGArray(conn, strLedgerExtensions, mLedgerExtensions); - std::string sql = - "WITH r AS (SELECT " - "unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[]), " - "unnest(:v3::INT[]), " - "unnest(:v4::TEXT[]), " - "unnest(:v5::TEXT[]) " - ")" - "INSERT INTO accountdata ( " - "accountid, dataname, datavalue, lastmodified, extension, " - "ledgerext " - ") SELECT * FROM r " - "ON CONFLICT (accountid, dataname) DO UPDATE SET " - "datavalue = excluded.datavalue, " - "lastmodified = excluded.lastmodified, " - "extension = excluded.extension, " - "ledgerext = excluded.ledgerext"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strDataNames)); - st.exchange(soci::use(strDataValues)); - st.exchange(soci::use(strLastModifieds)); - st.exchange(soci::use(strExtensions)); - st.exchange(soci::use(strLedgerExtensions)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("data"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; class BulkDeleteDataOperation : public DatabaseTypeSpecificOperation @@ -262,39 +211,6 @@ class BulkDeleteDataOperation : public DatabaseTypeSpecificOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs; - std::string strDataNames; - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strDataNames, mDataNames); - std::string sql = - "WITH r AS ( SELECT " - "unnest(:ids::TEXT[])," - "unnest(:v1::TEXT[])" - " ) " - "DELETE FROM accountdata WHERE (accountid, dataname) IN " - "(SELECT * FROM r)"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strDataNames)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("data"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -328,27 +244,17 @@ LedgerTxnRoot::Impl::dropData(bool rebuild) if (rebuild) { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); mApp.getDatabase().getSession() << "CREATE TABLE accountdata" << "(" - << "accountid VARCHAR(56) " << coll << " NOT NULL," - << "dataname VARCHAR(88) " << coll << " NOT NULL," + << "accountid VARCHAR(56) NOT NULL," + << "dataname VARCHAR(88) NOT NULL," << "datavalue VARCHAR(112) NOT NULL," "lastmodified INT NOT NULL," "extension TEXT," "ledgerext TEXT NOT NULL," "PRIMARY KEY (accountid, dataname)" ");"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() - << "ALTER TABLE accountdata " - << "ALTER COLUMN accountid " - << "TYPE VARCHAR(56) COLLATE \"C\", " - << "ALTER COLUMN dataname " - << "TYPE VARCHAR(88) COLLATE \"C\""; - } } } @@ -461,31 +367,6 @@ class BulkLoadDataOperation sqlite3_bind_int(st, 4, static_cast(cstrDataNames.size())); return executeAndFetch(prep.statement()); } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - releaseAssert(mAccountIDs.size() == mDataNames.size()); - - std::string strAccountIDs; - std::string strDataNames; - marshalToPGArray(pg->conn_, strAccountIDs, mAccountIDs); - marshalToPGArray(pg->conn_, strDataNames, mDataNames); - - std::string sql = - "WITH r AS (SELECT unnest(:v1::TEXT[]), unnest(:v2::TEXT[])) " - "SELECT accountid, dataname, datavalue, lastmodified, extension, " - "ledgerext " - "FROM accountdata WHERE (accountid, dataname) IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strDataNames)); - return executeAndFetch(st); - } -#endif }; UnorderedMap> diff --git a/src/ledger/LedgerTxnImpl.h b/src/ledger/LedgerTxnImpl.h index fcfe43e788..1d8b7fa712 100644 --- a/src/ledger/LedgerTxnImpl.h +++ b/src/ledger/LedgerTxnImpl.h @@ -10,12 +10,6 @@ #include "util/RandomEvictionCache.h" #include #include -#ifdef USE_POSTGRES -#include -#include -#include -#include -#endif namespace stellar { @@ -47,11 +41,9 @@ class EntryIterator::AbstractImpl // Helper struct to accumulate common cases that we can sift out of the // commit stream and perform in bulk (as single SQL statements per-type) // rather than making each insert/update/delete individually. This uses the -// postgres and sqlite-supported "ON CONFLICT"-style upserts, and uses -// soci's bulk operations where it can (i.e. for sqlite, or potentially -// others), and manually-crafted postgres unnest([array]) calls where it -// can't. This is not great, but it appears to be less work than -// reorganizing the relevant parts of soci. +// sqlite-supported "ON CONFLICT"-style upserts, and uses +// soci's bulk operations where it can. This is not great, but it appears +// to be less work than reorganizing the relevant parts of soci. class BulkLedgerEntryChangeAccumulator { @@ -1004,61 +996,4 @@ fromOpaqueBase64(T& res, std::string const& opaqueBase64) decoder::decode_b64(opaqueBase64, opaque); xdr::xdr_from_opaque(opaque, res); } - -#ifdef USE_POSTGRES -template -inline void -marshalToPGArrayItem(PGconn* conn, std::ostringstream& oss, const T& item) -{ - // NB: This setprecision is very important to ensuring that a double - // gets marshaled to enough decimal digits to reconstruct exactly the - // same double on the postgres side (that precision-level is exactly - // what max_digits10 is defined as). Do not remove it! - oss << std::setprecision(std::numeric_limits::max_digits10) << item; -} - -template <> -inline void -marshalToPGArrayItem(PGconn* conn, std::ostringstream& oss, - const std::string& item) -{ - std::vector buf(item.size() * 2 + 1, '\0'); - int err = 0; - size_t len = - PQescapeStringConn(conn, buf.data(), item.c_str(), item.size(), &err); - if (err != 0) - { - throw std::runtime_error("Could not escape string in SQL"); - } - oss << '"'; - oss.write(buf.data(), len); - oss << '"'; -} - -template -inline void -marshalToPGArray(PGconn* conn, std::string& out, const std::vector& v, - const std::vector* ind = nullptr) -{ - std::ostringstream oss; - oss << '{'; - for (size_t i = 0; i < v.size(); ++i) - { - if (i > 0) - { - oss << ','; - } - if (ind && (*ind)[i] == soci::i_null) - { - oss << "NULL"; - } - else - { - marshalToPGArrayItem(conn, oss, v[i]); - } - } - oss << '}'; - out = oss.str(); -} -#endif } diff --git a/src/ledger/LedgerTxnLiquidityPoolSQL.cpp b/src/ledger/LedgerTxnLiquidityPoolSQL.cpp index ce8289b284..37a5527902 100644 --- a/src/ledger/LedgerTxnLiquidityPoolSQL.cpp +++ b/src/ledger/LedgerTxnLiquidityPoolSQL.cpp @@ -136,25 +136,6 @@ class BulkLoadLiquidityPoolOperation sqlite3_bind_int(st, 2, static_cast(cstrPoolAssets.size())); return executeAndFetch(prep.statement()); } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strPoolAssets; - marshalToPGArray(pg->conn_, strPoolAssets, mPoolAssets); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT ledgerentry " - "FROM liquiditypool " - "WHERE poolasset IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strPoolAssets)); - return executeAndFetch(st); - } -#endif }; UnorderedMap> @@ -219,33 +200,6 @@ class BulkDeleteLiquidityPoolOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strPoolAssets; - marshalToPGArray(pg->conn_, strPoolAssets, mPoolAssets); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "DELETE FROM liquiditypool " - "WHERE poolasset IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strPoolAssets)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("liquiditypool"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mPoolAssets.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -330,53 +284,6 @@ class BulkUpsertLiquidityPoolOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strPoolAssets, strAssetAs, strAssetBs, - strLiquidityPoolEntry, strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strPoolAssets, mPoolAssets); - marshalToPGArray(conn, strAssetAs, mAssetAs); - marshalToPGArray(conn, strAssetBs, mAssetBs); - marshalToPGArray(conn, strLiquidityPoolEntry, mLiquidityPoolEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = - "WITH r AS " - "(SELECT unnest(:ids::TEXT[]), unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[]), unnest(:v3::TEXT[]), " - "unnest(:v4::INT[])) " - "INSERT INTO liquiditypool " - "(poolasset, asseta, assetb, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (poolasset) DO UPDATE SET " - "asseta = excluded.asseta, " - "assetb = excluded.assetb, " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strPoolAssets)); - st.exchange(soci::use(strAssetAs)); - st.exchange(soci::use(strAssetBs)); - st.exchange(soci::use(strLiquidityPoolEntry)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("liquiditypool"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mPoolAssets.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -398,16 +305,15 @@ LedgerTxnRoot::Impl::dropLiquidityPools(bool rebuild) if (rebuild) { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); // The primary key is poolasset (the base-64 opaque TrustLineAsset // containing the PoolID) instead of poolid (the base-64 opaque PoolID) // so that we can perform the join in load pool share trust lines by // account and asset. mApp.getDatabase().getSession() << "CREATE TABLE liquiditypool (" - << "poolasset TEXT " << coll << " PRIMARY KEY, " - << "asseta TEXT " << coll << " NOT NULL, " - << "assetb TEXT " << coll << " NOT NULL, " + << "poolasset TEXT PRIMARY KEY, " + << "asseta TEXT NOT NULL, " + << "assetb TEXT NOT NULL, " << "ledgerentry TEXT NOT NULL, " << "lastmodified INT NOT NULL);"; mApp.getDatabase().getSession() << "CREATE INDEX liquiditypoolasseta " diff --git a/src/ledger/LedgerTxnOfferSQL.cpp b/src/ledger/LedgerTxnOfferSQL.cpp index 6481bae9f7..e1663e9e37 100644 --- a/src/ledger/LedgerTxnOfferSQL.cpp +++ b/src/ledger/LedgerTxnOfferSQL.cpp @@ -471,89 +471,6 @@ class BulkUpsertOffersOperation : public DatabaseTypeSpecificOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - - std::string strSellerIDs, strOfferIDs, strSellingAssets, - strBuyingAssets, strAmounts, strPriceNs, strPriceDs, strPrices, - strFlags, strLastModifieds, strExtensions, strLedgerExtensions; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strSellerIDs, mSellerIDs); - marshalToPGArray(conn, strOfferIDs, mOfferIDs); - - marshalToPGArray(conn, strSellingAssets, mSellingAssets); - marshalToPGArray(conn, strBuyingAssets, mBuyingAssets); - - marshalToPGArray(conn, strAmounts, mAmounts); - marshalToPGArray(conn, strPriceNs, mPriceNs); - marshalToPGArray(conn, strPriceDs, mPriceDs); - marshalToPGArray(conn, strPrices, mPrices); - marshalToPGArray(conn, strFlags, mFlags); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - marshalToPGArray(conn, strExtensions, mExtensions); - marshalToPGArray(conn, strLedgerExtensions, mLedgerExtensions); - - std::string sql = - "WITH r AS (SELECT " - "unnest(:v1::TEXT[]), " - "unnest(:v2::BIGINT[]), " - "unnest(:v3::TEXT[]), " - "unnest(:v4::TEXT[]), " - "unnest(:v5::BIGINT[]), " - "unnest(:v6::INT[]), " - "unnest(:v7::INT[]), " - "unnest(:v8::DOUBLE PRECISION[]), " - "unnest(:v9::INT[]), " - "unnest(:v10::INT[]), " - "unnest(:v11::TEXT[]), " - "unnest(:v12::TEXT[]) " - ")" - "INSERT INTO offers ( " - "sellerid, offerid, sellingasset, buyingasset, " - "amount, pricen, priced, price, flags, lastmodified, extension, " - "ledgerext " - ") SELECT * from r " - "ON CONFLICT (offerid) DO UPDATE SET " - "sellerid = excluded.sellerid, " - "sellingasset = excluded.sellingasset, " - "buyingasset = excluded.buyingasset, " - "amount = excluded.amount, " - "pricen = excluded.pricen, " - "priced = excluded.priced, " - "price = excluded.price, " - "flags = excluded.flags, " - "lastmodified = excluded.lastmodified, " - "extension = excluded.extension, " - "ledgerext = excluded.ledgerext"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strSellerIDs)); - st.exchange(soci::use(strOfferIDs)); - st.exchange(soci::use(strSellingAssets)); - st.exchange(soci::use(strBuyingAssets)); - st.exchange(soci::use(strAmounts)); - st.exchange(soci::use(strPriceNs)); - st.exchange(soci::use(strPriceDs)); - st.exchange(soci::use(strPrices)); - st.exchange(soci::use(strFlags)); - st.exchange(soci::use(strLastModifieds)); - st.exchange(soci::use(strExtensions)); - st.exchange(soci::use(strLedgerExtensions)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("offer"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mOfferIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; class BulkDeleteOffersOperation : public DatabaseTypeSpecificOperation @@ -602,34 +519,6 @@ class BulkDeleteOffersOperation : public DatabaseTypeSpecificOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - PGconn* conn = pg->conn_; - std::string strOfferIDs; - marshalToPGArray(conn, strOfferIDs, mOfferIDs); - std::string sql = "WITH r AS (SELECT " - "unnest(:ids::BIGINT[]) " - ") " - "DELETE FROM offers WHERE " - "offerid IN (SELECT * FROM r)"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strOfferIDs)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("offer"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mOfferIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -662,15 +551,14 @@ LedgerTxnRoot::Impl::dropOffers(bool rebuild) if (rebuild) { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); mApp.getDatabase().getSession() << "CREATE TABLE offers" << "(" - << "sellerid VARCHAR(56) " << coll << "NOT NULL," + << "sellerid VARCHAR(56) NOT NULL," << "offerid BIGINT NOT NULL CHECK (offerid >= " "0)," - << "sellingasset TEXT " << coll << " NOT NULL," - << "buyingasset TEXT " << coll << " NOT NULL," + << "sellingasset TEXT NOT NULL," + << "buyingasset TEXT NOT NULL," << "amount BIGINT NOT NULL CHECK (amount >= 0)," "pricen INT NOT NULL," "priced INT NOT NULL," @@ -687,17 +575,6 @@ LedgerTxnRoot::Impl::dropOffers(bool rebuild) mApp.getDatabase().getSession() << "CREATE INDEX offerbyseller ON offers " "(sellerid);"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() - << "ALTER TABLE offers " - << "ALTER COLUMN sellerid " - << "TYPE VARCHAR(56) COLLATE \"C\", " - << "ALTER COLUMN buyingasset " - << "TYPE TEXT COLLATE \"C\", " - << "ALTER COLUMN sellingasset " - << "TYPE TEXT COLLATE \"C\""; - } } } @@ -805,26 +682,6 @@ class BulkLoadOffersOperation sqlite3_bind_int(st, 2, static_cast(mOfferIDs.size())); return executeAndFetch(prep.statement()); } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strOfferIDs; - marshalToPGArray(pg->conn_, strOfferIDs, mOfferIDs); - - std::string sql = - "WITH r AS (SELECT unnest(:v1::BIGINT[])) " - "SELECT sellerid, offerid, sellingasset, buyingasset, " - "amount, pricen, priced, flags, lastmodified, extension, " - "ledgerext " - "FROM offers WHERE offerid IN (SELECT * FROM r)"; - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strOfferIDs)); - return executeAndFetch(st); - } -#endif }; UnorderedMap> diff --git a/src/ledger/LedgerTxnTTLSQL.cpp b/src/ledger/LedgerTxnTTLSQL.cpp index 363923a14d..1b3766b418 100644 --- a/src/ledger/LedgerTxnTTLSQL.cpp +++ b/src/ledger/LedgerTxnTTLSQL.cpp @@ -123,25 +123,6 @@ class BulkLoadTTLOperation sqlite3_bind_int(st, 2, static_cast(cStrKeyHashes.size())); return executeAndFetch(prep.statement()); } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strKeyHashes; - marshalToPGArray(pg->conn_, strKeyHashes, mKeyHashes); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT ledgerentry " - "FROM ttl " - "WHERE (keyHash) IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strKeyHashes)); - return executeAndFetch(st); - } -#endif }; UnorderedMap> @@ -204,33 +185,6 @@ class BulkDeleteTTLOperation : public DatabaseTypeSpecificOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strKeyHashes; - marshalToPGArray(pg->conn_, strKeyHashes, mKeyHashes); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "DELETE FROM ttl " - "WHERE keyHash IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strKeyHashes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("ttl"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mKeyHashes.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -303,44 +257,6 @@ class BulkUpsertTTLOperation : public DatabaseTypeSpecificOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strKeyHashes, strTTLEntries, strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strKeyHashes, mKeyHashes); - marshalToPGArray(conn, strTTLEntries, mTTLEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS " - "(SELECT unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[]), unnest(:v3::INT[])) " - "INSERT INTO ttl " - "(keyHash, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (keyhash) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strKeyHashes)); - st.exchange(soci::use(strTTLEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("ttl"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mKeyHashes.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -357,24 +273,16 @@ LedgerTxnRoot::Impl::dropTTL(bool rebuild) mEntryCache.clear(); mBestOffers.clear(); - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS ttl;"; if (rebuild) { mApp.getDatabase().getSession() << "CREATE TABLE ttl (" - << "keyhash TEXT " << coll << " NOT NULL, " - << "ledgerentry TEXT " << coll << " NOT NULL, " + << "keyhash TEXT NOT NULL, " + << "ledgerentry TEXT NOT NULL, " << "lastmodified INT NOT NULL, " << "PRIMARY KEY (keyhash));"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() << "ALTER TABLE ttl " - << "ALTER COLUMN keyhash " - << "TYPE TEXT COLLATE \"C\";"; - } } } diff --git a/src/ledger/LedgerTxnTrustLineSQL.cpp b/src/ledger/LedgerTxnTrustLineSQL.cpp index 78631cd25a..16e8ec3a39 100644 --- a/src/ledger/LedgerTxnTrustLineSQL.cpp +++ b/src/ledger/LedgerTxnTrustLineSQL.cpp @@ -192,49 +192,6 @@ class BulkUpsertTrustLinesOperation : public DatabaseTypeSpecificOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - PGconn* conn = pg->conn_; - - std::string strAccountIDs, strAssets, strTrustLineEntries, - strLastModifieds; - - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strAssets, mAssets); - marshalToPGArray(conn, strTrustLineEntries, mTrustLineEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS (SELECT " - "unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[]), " - "unnest(:v3::INT[])) " - "INSERT INTO trustlines ( " - "accountid, asset, ledgerEntry, lastmodified" - ") SELECT * from r " - "ON CONFLICT (accountid, asset) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strAssets)); - st.exchange(soci::use(strTrustLineEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("trustline"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; class BulkDeleteTrustLinesOperation : public DatabaseTypeSpecificOperation @@ -293,37 +250,6 @@ class BulkDeleteTrustLinesOperation : public DatabaseTypeSpecificOperation { doSociGenericOperation(); } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs, strAssets; - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strAssets, mAssets); - std::string sql = "WITH r AS (SELECT " - "unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[])" - ") " - "DELETE FROM trustlines WHERE " - "(accountid, asset) IN (SELECT * FROM r)"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strAssets)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("trustline"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif }; void @@ -359,12 +285,11 @@ LedgerTxnRoot::Impl::dropTrustLines(bool rebuild) if (rebuild) { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); mApp.getDatabase().getSession() << "CREATE TABLE trustlines" << "(" - << "accountid VARCHAR(56) " << coll << " NOT NULL," - << "asset TEXT " << coll << " NOT NULL," + << "accountid VARCHAR(56) NOT NULL," + << "asset TEXT NOT NULL," << "ledgerentry TEXT NOT NULL," << "lastmodified INT NOT NULL," << "PRIMARY KEY (accountid, asset));"; @@ -474,31 +399,6 @@ class BulkLoadTrustLinesOperation sqlite3_bind_int(st, 4, static_cast(cstrAssets.size())); return executeAndFetch(prep.statement()); } - -#ifdef USE_POSTGRES - virtual std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - releaseAssert(mAccountIDs.size() == mAssets.size()); - - std::string strAccountIDs; - std::string strAssets; - marshalToPGArray(pg->conn_, strAccountIDs, mAccountIDs); - marshalToPGArray(pg->conn_, strAssets, mAssets); - - auto prep = mDb.getPreparedStatement( - "WITH r AS (SELECT unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[])) SELECT accountid, asset, " - "ledgerentry " - " FROM trustlines " - "WHERE (accountid, asset) IN (SELECT * " - "FROM r)"); - auto& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strAssets)); - return executeAndFetch(st); - } -#endif }; UnorderedMap> diff --git a/src/ledger/test/LedgerTxnTests.cpp b/src/ledger/test/LedgerTxnTests.cpp index 3c00e67938..171bbfa476 100644 --- a/src/ledger/test/LedgerTxnTests.cpp +++ b/src/ledger/test/LedgerTxnTests.cpp @@ -319,13 +319,6 @@ TEST_CASE("LedgerTxn rollback into LedgerTxn", "[ledgertxn]") { runTest(Config::TESTDB_DEFAULT); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif } TEST_CASE("LedgerTxn round trip", "[ledgertxn]") @@ -482,13 +475,6 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]") { runTestWithDbMode(Config::TESTDB_DEFAULT); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTestWithDbMode(Config::TESTDB_POSTGRESQL); - } -#endif } TEST_CASE("LedgerTxn rollback and commit deactivate", "[ledgertxn]") @@ -691,13 +677,6 @@ TEST_CASE("LedgerTxn createWithoutLoading and updateWithoutLoading", { runTest(Config::TESTDB_DEFAULT); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif } TEST_CASE("LedgerTxn erase", "[ledgertxn]") @@ -781,13 +760,6 @@ TEST_CASE("LedgerTxn erase", "[ledgertxn]") { runTest(Config::TESTDB_DEFAULT); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif } TEST_CASE("LedgerTxn eraseWithoutLoading", "[ledgertxn]") @@ -876,13 +848,6 @@ TEST_CASE("LedgerTxn eraseWithoutLoading", "[ledgertxn]") { runTest(Config::TESTDB_DEFAULT); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif } static void @@ -1330,13 +1295,6 @@ TEST_CASE("LedgerTxn loadHeader", "[ledgertxn]") { runTest(Config::TESTDB_DEFAULT); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif } TEST_CASE_VERSIONS("LedgerTxn load", "[ledgertxn]") @@ -1494,13 +1452,6 @@ TEST_CASE_VERSIONS("LedgerTxn load", "[ledgertxn]") { runTest(Config::TESTDB_DEFAULT); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif } TEST_CASE("LedgerTxn loadWithoutRecord", "[ledgertxn]") @@ -1839,13 +1790,6 @@ TEST_CASE("LedgerTxn loadAllOffers", "[ledgertxn]") { runTest(Config::TESTDB_DEFAULT); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif } static void @@ -2258,13 +2202,6 @@ TEST_CASE("LedgerTxn loadBestOffer", "[ledgertxn]") { runTest(Config::TESTDB_DEFAULT); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif } static void @@ -2668,13 +2605,6 @@ TEST_CASE("LedgerTxnRoot prefetch", "[ledgertxn]") { runTest(Config::TESTDB_DEFAULT); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif } TEST_CASE("Create performance benchmark", "[!hide][createbench]") @@ -2731,14 +2661,6 @@ TEST_CASE("Create performance benchmark", "[!hide][createbench]") runTest(Config::TESTDB_ON_DISK_SQLITE, true); runTest(Config::TESTDB_ON_DISK_SQLITE, false); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL, true); - runTest(Config::TESTDB_POSTGRESQL, false); - } -#endif } TEST_CASE("Erase performance benchmark", "[!hide][erasebench]") @@ -2794,14 +2716,6 @@ TEST_CASE("Erase performance benchmark", "[!hide][erasebench]") runTest(Config::TESTDB_ON_DISK_SQLITE, true); runTest(Config::TESTDB_ON_DISK_SQLITE, false); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL, true); - runTest(Config::TESTDB_POSTGRESQL, false); - } -#endif } TEST_CASE("Bulk load batch size benchmark", "[!hide][bulkbatchsizebench]") @@ -2859,13 +2773,6 @@ TEST_CASE("Bulk load batch size benchmark", "[!hide][bulkbatchsizebench]") { runTest(Config::TESTDB_ON_DISK_SQLITE); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif } TEST_CASE("Signers performance benchmark", "[!hide][signersbench]") @@ -3008,13 +2915,6 @@ TEST_CASE("Signers performance benchmark", "[!hide][signersbench]") { runTests(Config::TESTDB_ON_DISK_SQLITE); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTests(Config::TESTDB_POSTGRESQL); - } -#endif } TEST_CASE("Load best offers benchmark", "[!hide][bestoffersbench]") @@ -3177,13 +3077,6 @@ TEST_CASE("Load best offers benchmark", "[!hide][bestoffersbench]") getTimeSpent(*app, "write"), getTimeSpent(*app, "load")); }; -#ifdef USE_POSTGRES - SECTION("postgres") - { - runTest(Config::TESTDB_POSTGRESQL, 10, 5, 25000); - } -#endif - SECTION("sqlite") { runTest(Config::TESTDB_ON_DISK_SQLITE, 10, 5, 25000); @@ -3596,13 +3489,6 @@ TEST_CASE("LedgerTxn in memory order book", "[ledgertxn]") { runTest(Config::TESTDB_DEFAULT); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif } TEST_CASE_VERSIONS("LedgerTxn bulk-load offers", "[ledgertxn]") @@ -3637,13 +3523,6 @@ TEST_CASE_VERSIONS("LedgerTxn bulk-load offers", "[ledgertxn]") { runTest(Config::TESTDB_DEFAULT); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif } TEST_CASE("Access deactivated entry", "[ledgertxn]") @@ -3775,13 +3654,6 @@ TEST_CASE("Access deactivated entry", "[ledgertxn]") { runTest(Config::TESTDB_DEFAULT); } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif } TEST_CASE("LedgerTxn generalized ledger entries", "[ledgertxn]") diff --git a/src/main/Config.h b/src/main/Config.h index 6c334bab6a..030c8e8e92 100644 --- a/src/main/Config.h +++ b/src/main/Config.h @@ -121,9 +121,6 @@ class Config : public std::enable_shared_from_this TESTDB_DEFAULT, TESTDB_IN_MEMORY_SQLITE, TESTDB_ON_DISK_SQLITE, -#ifdef USE_POSTGRES - TESTDB_POSTGRESQL, -#endif TESTDB_MODES }; diff --git a/src/overlay/PeerManager.cpp b/src/overlay/PeerManager.cpp index 4abfbc946c..543778279d 100644 --- a/src/overlay/PeerManager.cpp +++ b/src/overlay/PeerManager.cpp @@ -95,10 +95,6 @@ PeerManager::loadRandomPeers(PeerQuery const& query, size_t size) // BATCH_SIZE should always be bigger, so it should win anyway size = std::max(size, BATCH_SIZE); - // if we ever start removing peers from db, we may need to enable this - // soci::transaction sqltx(mApp.getDatabase().getSession()); - // mApp.getDatabase().setCurrentTransactionReadOnly(); - std::vector conditions; if (query.mUseNextAttempt) { diff --git a/src/simulation/CoreTests.cpp b/src/simulation/CoreTests.cpp index 6460364a99..981177289c 100644 --- a/src/simulation/CoreTests.cpp +++ b/src/simulation/CoreTests.cpp @@ -431,11 +431,7 @@ TEST_CASE( Application::pointer newLoadTestApp(VirtualClock& clock) { - Config cfg = -#ifdef USE_POSTGRES - !force_sqlite ? getTestConfig(0, Config::TESTDB_POSTGRESQL) : -#endif - getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE); + Config cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE); cfg.RUN_STANDALONE = false; // force maxTxSetSize to avoid throwing txSets on the floor during the first // ledger close diff --git a/src/test/run-selftest-pg b/src/test/run-selftest-pg deleted file mode 100755 index 28982a1dd9..0000000000 --- a/src/test/run-selftest-pg +++ /dev/null @@ -1,116 +0,0 @@ -#!/bin/bash - -# Copyright 2018 Stellar Development Foundation and contributors. Licensed -# under the Apache License, Version 2.0. See the COPYING file at the root -# of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -BASE_INSTANCE="$1" -TESTS="$2" -if [[ -z "$TEMP_POSTGRES" ]]; then - TEMP_POSTGRES=1 -fi - -STELLAR_CORE_DEFAULT_TEST_PARAMS="--ll fatal -w NoTests -a -r simple" -if [[ "$ALL_VERSIONS" != "" ]]; then - STELLAR_CORE_DEFAULT_TEST_PARAMS="$STELLAR_CORE_DEFAULT_TEST_PARAMS --all-versions" -fi - -: ${STELLAR_CORE_TEST_PARAMS=$STELLAR_CORE_DEFAULT_TEST_PARAMS} - -PGDIRS=$(exec 2>/dev/null; cd /usr/lib/postgresql && ls -1d */bin | sort -rn | xargs realpath) - -findpg() { - local cmd="$1" - command -v "$cmd" > /dev/null && return - for dir in /usr/bin /usr/local/bin $PGDIRS; do - if [[ -x "$dir/$cmd" ]]; then - PATH="${PATH}:$dir" - return - fi - done -} - -findpg psql -PSQL=psql - -findpg pg_ctl -PGCTL=pg_ctl - -if [ "$(which $PGCTL)" == "" ]; then - findpg pg_ctlcluster - PGCTL=pg_ctlcluster - - if [ "$(which $PGCTL)" == "" ]; then - echo "Could not find pg_ctl or pg_ctlcluster" - exit 1 - fi -fi - -cleanup() { - $PGCTL stop -D $PGDATA -m immediate - rm -rf "$PGDATA" - exit -} - -# Creates a temporary postgres database cluster, runs a command (or a -# shell), then deletes the database cluster. If PGDATA is already -# set, then simply executes a command without running another postgres -# instance. -runpg() { - export PGDATA=$(mktemp -d ${TMPDIR-/tmp}/pgtmp.XXXXXXXX) - export PGHOST="$PGDATA" - export PGUSER=postgres - - trap cleanup 0 2 15 - - # pick a random utf locale if we can as to increase - # detection of encoding issues - DB_LOC=`locale -a | grep -i utf | sort -R | head -1` - if [ -z "$DB_LOC" ] ; then DB_LOC="--no-locale" ; else DB_LOC="--locale=$DB_LOC" ; fi - - echo Creating temporary PostgreSQL database cluster in "$PGDATA" with $DB_LOC - - $PGCTL init -s -o "$DB_LOC -U ${PGUSER-postgres} -A trust" \ - || return 1 - conf="$PGDATA/postgresql.conf" - usd=$(sed -ne '/#\(unix_socket_director[^ ]*\) =.*/{ - s//\1/p - q - }' "$conf") - cat >> "$conf" < /dev/null -} - -setup_test() { - runpg || return 1 - for i in $(seq $BASE_INSTANCE $((BASE_INSTANCE+14))) ''; do - $PSQL -c "create database test$i;" &> /dev/null - done -} - -if test "$TEMP_POSTGRES" != 0; then - if setup_test; then - echo "PostgreSQL enabled for tests using temporary database cluster" - else - echo "Could not enable PostgreSQL" - exit 1 - fi -else - echo "PostgreSQL enabled for tests using existing database cluster" -fi - -./stellar-core test $STELLAR_CORE_TEST_PARAMS --base-instance $BASE_INSTANCE "$TESTS" 2> /dev/null -R=$? -if [[ $R -ne 0 ]] ; then - echo "Test failed, rerunning with debugger" - echo ./stellar-core test $STELLAR_CORE_TEST_PARAMS --base-instance $BASE_INSTANCE "$TESTS" - lldb -o 'r' -o 'bt' -o 'exit' -- ./stellar-core test $STELLAR_CORE_TEST_PARAMS --base-instance $BASE_INSTANCE "$TESTS" -fi -exit $R diff --git a/src/test/selftest-pg b/src/test/selftest-pg deleted file mode 100755 index b856805799..0000000000 --- a/src/test/selftest-pg +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -# Copyright 2018 Stellar Development Foundation and contributors. Licensed -# under the Apache License, Version 2.0. See the COPYING file at the root -# of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -exec ./test/selftest-parallel ./test/run-selftest-pg diff --git a/src/test/test.cpp b/src/test/test.cpp index c99192aa9b..96fd8bec6c 100644 --- a/src/test/test.cpp +++ b/src/test/test.cpp @@ -198,7 +198,6 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode) // you can change this by enabling the appropriate line below mode = Config::TESTDB_IN_MEMORY_SQLITE; // mode = Config::TESTDB_ON_DISK_SQLITE; - // mode = Config::TESTDB_POSTGRESQL; } auto& cfgs = gTestCfg[mode]; if (cfgs.size() <= static_cast(instanceNumber)) @@ -286,11 +285,6 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode) case Config::TESTDB_ON_DISK_SQLITE: dbname << "sqlite3://" << rootDir << "test.db"; break; -#ifdef USE_POSTGRES - case Config::TESTDB_POSTGRESQL: - dbname << "postgresql://dbname=test" << instanceNumber; - break; -#endif default: abort(); }