From 8b76bddc88ebdcfce1bd51152172d1026f307a67 Mon Sep 17 00:00:00 2001 From: alvrs Date: Wed, 31 Jul 2024 18:45:07 +0100 Subject: [PATCH 01/28] feat(store-sync): add util to fetch snapshot from dozer --- packages/store-sync/package.json | 8 +- packages/store-sync/src/dozer/common.ts | 30 ++++ .../src/dozer/fetchInitialBlockLogsDozer.ts | 84 +++++++++++ .../src/dozer/fetchRecordsDozerSql.test.ts | 136 ++++++++++++++++++ .../src/dozer/fetchRecordsDozerSql.ts | 69 +++++++++ packages/store-sync/src/dozer/index.ts | 4 + packages/store-sync/src/dozer/selectFrom.ts | 19 +++ packages/store-sync/src/index.ts | 1 + packages/store-sync/src/logToRecord.test.ts | 43 ++++++ packages/store-sync/src/logToRecord.ts | 26 ++++ packages/store-sync/src/recordToLog.test.ts | 52 +++++++ packages/store-sync/src/recordToLog.ts | 39 +++++ packages/store-sync/tsup.config.ts | 1 + 13 files changed, 511 insertions(+), 1 deletion(-) create mode 100644 packages/store-sync/src/dozer/common.ts create mode 100644 packages/store-sync/src/dozer/fetchInitialBlockLogsDozer.ts create mode 100644 packages/store-sync/src/dozer/fetchRecordsDozerSql.test.ts create mode 100644 packages/store-sync/src/dozer/fetchRecordsDozerSql.ts create mode 100644 packages/store-sync/src/dozer/index.ts create mode 100644 packages/store-sync/src/dozer/selectFrom.ts create mode 100644 packages/store-sync/src/logToRecord.test.ts create mode 100644 packages/store-sync/src/logToRecord.ts create mode 100644 packages/store-sync/src/recordToLog.test.ts create mode 100644 packages/store-sync/src/recordToLog.ts diff --git a/packages/store-sync/package.json b/packages/store-sync/package.json index 878aa2306c..1e79d9405c 100644 --- a/packages/store-sync/package.json +++ b/packages/store-sync/package.json @@ -12,12 +12,14 @@ "exports": { ".": "./dist/index.js", "./indexer-client": "./dist/indexer-client/index.js", + "./dozer": "./dist/dozer/index.js", "./postgres": "./dist/postgres/index.js", "./postgres-decoded": "./dist/postgres-decoded/index.js", "./recs": "./dist/recs/index.js", "./sqlite": "./dist/sqlite/index.js", "./trpc-indexer": "./dist/trpc-indexer/index.js", - "./zustand": "./dist/zustand/index.js" + "./zustand": "./dist/zustand/index.js", + "./zustand-query": "./dist/zustand-query/index.js" }, "typesVersions": { "*": { @@ -27,6 +29,9 @@ "indexer-client": [ "./dist/indexer-client/index.d.ts" ], + "dozer": [ + "./dist/dozer/index.d.ts" + ], "postgres": [ "./dist/postgres/index.d.ts" ], @@ -72,6 +77,7 @@ "@latticexyz/schema-type": "workspace:*", "@latticexyz/store": "workspace:*", "@latticexyz/world": "workspace:*", + "@latticexyz/zustand-query": "workspace:*", "@trpc/client": "10.34.0", "@trpc/server": "10.34.0", "change-case": "^5.2.0", diff --git a/packages/store-sync/src/dozer/common.ts b/packages/store-sync/src/dozer/common.ts new file mode 100644 index 0000000000..d12b65bba5 --- /dev/null +++ b/packages/store-sync/src/dozer/common.ts @@ -0,0 +1,30 @@ +import { Table } from "@latticexyz/config"; +import { Hex } from "viem"; + +export type DozerTableQuery = { + table: Table; + /** + * SQL to filter the records of this table. + * The SQL result is expected to be of the same culumn shape as the table. + * Use the `selectFrom` helper to ensure the expected column shape. + * Note: requires an indexer with SQL API (ie. Dozer). + */ + sql: string; +}; + +export type DozerLogFilter = { + /** + * Filter logs by the table ID. + */ + table: Table; + /** + * Optionally filter by the `bytes32` value of the key in the first position (index zero of the record's key tuple). + */ + key0?: Hex; + /** + * Optionally filter by the `bytes32` value of the key in the second position (index one of the record's key tuple). + */ + key1?: Hex; +}; + +export type DozerSyncFilter = DozerTableQuery | DozerLogFilter; diff --git a/packages/store-sync/src/dozer/fetchInitialBlockLogsDozer.ts b/packages/store-sync/src/dozer/fetchInitialBlockLogsDozer.ts new file mode 100644 index 0000000000..edf6e2e5c0 --- /dev/null +++ b/packages/store-sync/src/dozer/fetchInitialBlockLogsDozer.ts @@ -0,0 +1,84 @@ +import { DozerLogFilter, DozerSyncFilter, DozerTableQuery } from "./common"; +import { Hex } from "viem"; +import { StorageAdapterBlock, SyncFilter } from "../common"; +import { fetchRecordsDozerSql } from "./fetchRecordsDozerSql"; +import { recordToLog } from "../recordToLog"; +import { getSnapshot } from "../getSnapshot"; +import { bigIntMin } from "@latticexyz/common/utils"; + +export type FetchInitialBlockLogsDozerArgs = { + dozerUrl: string; + storeAddress: Hex; + filters?: DozerSyncFilter[]; + startBlock?: bigint; + chainId: number; +}; + +export type FetchInitialBlockLogsDozerResult = { + initialBlockLogs: StorageAdapterBlock; +}; + +export async function fetchInitialBlockLogsDozer({ + dozerUrl, + storeAddress, + filters, + startBlock = 0n, + chainId, +}: FetchInitialBlockLogsDozerArgs): Promise { + const initialBlockLogs: StorageAdapterBlock = { blockNumber: startBlock, logs: [] }; + + // We execute the list of provided SQL queries for hydration. For performance + // reasons the queries are not executed against a fixed block height, but against + // the latest state. We therefore pass the min block number of all query results + // as overall block number. This means some logs will be re-fetched again during + // the hydration process, but after the hydration is complete, the state will be + // correct. Intermediate state updates during hydration might be incorrect (for + // partial updates), so we only notify consumers of state updates after the + // initial hydration is complete. + + const sqlFilters = filters && (filters.filter((filter) => "sql" in filter) as DozerTableQuery[]); + + // TODO: it might be more performant to execute individual sql queries separately than in one network request + // to parallelize on the backend (one request is expected to be execute against the same db state so it can't + // be parallelized). + const dozerTables = + sqlFilters && sqlFilters.length > 0 + ? await fetchRecordsDozerSql({ dozerUrl, storeAddress, queries: sqlFilters }) + : undefined; + + if (dozerTables) { + initialBlockLogs.blockNumber = dozerTables.blockHeight; + initialBlockLogs.logs = dozerTables.result.flatMap(({ table, records }) => + records.map((record) => recordToLog({ table, record, address: storeAddress })), + ); + } + + // Fetch the tables without SQL filter from the snapshot logs API for better performance. + const snapshotFilters = + filters && + filters + .filter((filter) => !("sql" in filter)) + .map((filter) => { + const { table, key0, key1 } = filter as DozerLogFilter; + return { tableId: table.tableId, key0, key1 } as SyncFilter; + }); + + const snapshot = + // If no filters are provided, the entire state is fetched + !snapshotFilters || snapshotFilters.length > 0 + ? await getSnapshot({ + chainId, + address: storeAddress, + filters: snapshotFilters, + indexerUrl: dozerUrl, + }) + : undefined; + + // The block number passed in the overall result will be the min of all queries and the snapshot. + if (snapshot) { + initialBlockLogs.blockNumber = bigIntMin(initialBlockLogs.blockNumber, snapshot.blockNumber); + initialBlockLogs.logs = [...initialBlockLogs.logs, ...snapshot.logs]; + } + + return { initialBlockLogs }; +} diff --git a/packages/store-sync/src/dozer/fetchRecordsDozerSql.test.ts b/packages/store-sync/src/dozer/fetchRecordsDozerSql.test.ts new file mode 100644 index 0000000000..3c65420e58 --- /dev/null +++ b/packages/store-sync/src/dozer/fetchRecordsDozerSql.test.ts @@ -0,0 +1,136 @@ +import { describe, expect, it } from "vitest"; +import { fetchRecordsDozerSql } from "./fetchRecordsDozerSql"; +import mudConfig from "@latticexyz/world/mud.config"; +import { selectFrom } from "./selectFrom"; + +describe("fetch dozer sql", () => { + // TODO: set up CI test case for this (requires setting up dozer in CI) + it("should fetch dozer sql", async () => { + const result = await fetchRecordsDozerSql({ + dozerUrl: "https://redstone2.dozer.skystrife.xyz/q", + storeAddress: "0x9d05cc196c87104a7196fcca41280729b505dbbf", + queries: [ + selectFrom({ table: mudConfig.tables.world__Balances, where: '"balance" > 0', limit: 2 }), + selectFrom({ table: mudConfig.tables.world__FunctionSignatures, limit: 10 }), + ], + }); + + expect(result).toMatchInlineSnapshot(` + { + "blockHeight": 4909521n, + "result": [ + { + "records": [ + { + "balance": 308500000000000000n, + "namespaceId": "0x6e73000000000000000000000000000000000000000000000000000000000000", + }, + ], + "table": { + "codegen": { + "dataStruct": false, + "outputDirectory": "tables", + "storeArgument": false, + "tableIdArgument": false, + }, + "deploy": { + "disabled": false, + }, + "key": [ + "namespaceId", + ], + "label": "Balances", + "name": "Balances", + "namespace": "world", + "schema": { + "balance": { + "internalType": "uint256", + "type": "uint256", + }, + "namespaceId": { + "internalType": "ResourceId", + "type": "bytes32", + }, + }, + "tableId": "0x7462776f726c6400000000000000000042616c616e6365730000000000000000", + "type": "table", + }, + }, + { + "records": [ + { + "functionSelector": "0x0560912900000000000000000000000000000000000000000000000000000000", + "functionSignature": "unregisterStoreHook(bytes32,address)", + }, + { + "functionSelector": "0x0ba51f4900000000000000000000000000000000000000000000000000000000", + "functionSignature": "registerTable(bytes32,bytes32,bytes32,bytes32,string[],string[])", + }, + { + "functionSelector": "0x127de47a00000000000000000000000000000000000000000000000000000000", + "functionSignature": "createMatch(string,bytes32,bytes32,bytes32)", + }, + { + "functionSelector": "0x17902d6100000000000000000000000000000000000000000000000000000000", + "functionSignature": "createMatchSeasonPass(string,bytes32,bytes32,bytes32,bytes32,uint256,uint256[],bool)", + }, + { + "functionSelector": "0x1b9a91a400000000000000000000000000000000000000000000000000000000", + "functionSignature": "withdrawEth(address,uint256)", + }, + { + "functionSelector": "0x1d2257ba00000000000000000000000000000000000000000000000000000000", + "functionSignature": "registerDelegation(address,bytes32,bytes)", + }, + { + "functionSelector": "0x1fc595cd00000000000000000000000000000000000000000000000000000000", + "functionSignature": "setOfficial(bytes32,bool)", + }, + { + "functionSelector": "0x219adc2e00000000000000000000000000000000000000000000000000000000", + "functionSignature": "renounceOwnership(bytes32)", + }, + { + "functionSelector": "0x220ca1f600000000000000000000000000000000000000000000000000000000", + "functionSignature": "toggleReady(bytes32)", + }, + { + "functionSelector": "0x231bb4cd00000000000000000000000000000000000000000000000000000000", + "functionSignature": "createNewSeasonPass(bytes14,uint256,uint256,uint256,uint256,uint256,uint256,uint256)", + }, + ], + "table": { + "codegen": { + "dataStruct": false, + "outputDirectory": "tables", + "storeArgument": false, + "tableIdArgument": false, + }, + "deploy": { + "disabled": false, + }, + "key": [ + "functionSelector", + ], + "label": "FunctionSignatures", + "name": "FunctionSignatur", + "namespace": "world", + "schema": { + "functionSelector": { + "internalType": "bytes4", + "type": "bytes4", + }, + "functionSignature": { + "internalType": "string", + "type": "string", + }, + }, + "tableId": "0x6f74776f726c6400000000000000000046756e6374696f6e5369676e61747572", + "type": "offchainTable", + }, + }, + ], + } + `); + }); +}); diff --git a/packages/store-sync/src/dozer/fetchRecordsDozerSql.ts b/packages/store-sync/src/dozer/fetchRecordsDozerSql.ts new file mode 100644 index 0000000000..6526d88ca6 --- /dev/null +++ b/packages/store-sync/src/dozer/fetchRecordsDozerSql.ts @@ -0,0 +1,69 @@ +import { DecodeDozerRecordsResult, DozerQueryResult, decodeDozerRecords } from "@latticexyz/protocol-parser/internal"; +import { Hex } from "viem"; +import { DozerTableQuery } from "./common"; +import { Table } from "@latticexyz/config"; + +type DozerResponseSuccess = { + block_height: string; + result: DozerQueryResult[]; +}; + +type DozerResponseFail = { msg: string }; + +type DozerResponse = DozerResponseSuccess | DozerResponseFail; + +type FetchDozerSqlArgs = { + dozerUrl: string; + storeAddress: Hex; + queries: DozerTableQuery[]; +}; + +type FetchDozerSqlResult = + | { + blockHeight: bigint; + result: { + table: Table; + records: DecodeDozerRecordsResult; + }[]; + } + | undefined; + +function isDozerResponseFail(response: DozerResponse): response is DozerResponseFail { + return "msg" in response; +} + +export async function fetchRecordsDozerSql({ + dozerUrl, + queries, + storeAddress, +}: FetchDozerSqlArgs): Promise { + const response: DozerResponse = await ( + await fetch(dozerUrl, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify(queries.map((query) => ({ address: storeAddress, query: query.sql }))), + }) + ).json(); + + if (isDozerResponseFail(response)) { + console.warn(`Dozer response: ${response.msg}\n\nTry reproducing via cURL: + curl ${dozerUrl} \\ + --compressed \\ + -H 'Accept-Encoding: gzip' \\ + -H 'Content-Type: application/json' \\ + -d '[${queries.map((query) => `{"address": "${storeAddress}", "query": "${query.sql.replaceAll('"', '\\"')}"}`).join(",")}]'`); + return; + } + + const result: FetchDozerSqlResult = { + blockHeight: BigInt(response.block_height), + result: response.result.map((records, index) => ({ + table: queries[index].table, + records: decodeDozerRecords({ schema: queries[index].table.schema, records }), + })), + }; + + return result; +} diff --git a/packages/store-sync/src/dozer/index.ts b/packages/store-sync/src/dozer/index.ts new file mode 100644 index 0000000000..844a4308ff --- /dev/null +++ b/packages/store-sync/src/dozer/index.ts @@ -0,0 +1,4 @@ +export * from "./common"; +export * from "./fetchRecordsDozerSql"; +export * from "./selectFrom"; +export * from "./fetchInitialBlockLogsDozer"; diff --git a/packages/store-sync/src/dozer/selectFrom.ts b/packages/store-sync/src/dozer/selectFrom.ts new file mode 100644 index 0000000000..81b1d757e7 --- /dev/null +++ b/packages/store-sync/src/dozer/selectFrom.ts @@ -0,0 +1,19 @@ +import { Table } from "@latticexyz/config"; +import { DozerTableQuery } from "./common"; + +// For autocompletion but still allowing all SQL strings +export type Where = `"${keyof table["schema"] & string}"` | (string & {}); + +export type SelectFromArgs
= { table: table; where?: Where
; limit?: number }; + +export function selectFrom
({ table, where, limit }: SelectFromArgs
): DozerTableQuery { + const dozerTableLabel = table.namespace === "" ? table.name : `${table.namespace}__${table.name}`; + return { + table: table, + sql: `select ${Object.keys(table.schema) + .map((key) => `"${key}"`) + .join( + ", ", + )} from ${dozerTableLabel}${where != null ? ` where ${where}` : ""}${limit != null ? ` limit ${limit}` : ""}`, + }; +} diff --git a/packages/store-sync/src/index.ts b/packages/store-sync/src/index.ts index b69f95f5ce..de276bd499 100644 --- a/packages/store-sync/src/index.ts +++ b/packages/store-sync/src/index.ts @@ -6,3 +6,4 @@ export * from "./isTableRegistrationLog"; export * from "./logToTable"; export * from "./tablesWithRecordsToLogs"; export * from "./tableToLog"; +export * from "./recordToLog"; diff --git a/packages/store-sync/src/logToRecord.test.ts b/packages/store-sync/src/logToRecord.test.ts new file mode 100644 index 0000000000..fc2704e749 --- /dev/null +++ b/packages/store-sync/src/logToRecord.test.ts @@ -0,0 +1,43 @@ +/* eslint-disable max-len */ +import { describe, it, expect } from "vitest"; +import { defineTable } from "@latticexyz/store/config/v2"; +import { logToRecord } from "./logToRecord"; + +describe("logToRecord", () => { + it("should convert a Store_SetRecord log into a decoded table record", async () => { + const table = defineTable({ + label: "Test", + schema: { + key1: "uint32", + key2: "uint256", + value1: "address", + value2: "string", + }, + key: ["key1", "key2"], + }); + + const record = { + key1: 1, + key2: 2n, + value1: "0x3Aa5ebB10DC797CAC828524e59A333d0A371443c", + value2: "hello", + } as const; + + const log = { + address: "0x3Aa5ebB10DC797CAC828524e59A333d0A371443c", + args: { + dynamicData: "0x68656c6c6f", + encodedLengths: "0x0000000000000000000000000000000000000000000000000500000000000005", + keyTuple: [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + ], + staticData: "0x3aa5ebb10dc797cac828524e59a333d0a371443c", + tableId: "0x7462000000000000000000000000000054657374000000000000000000000000", + }, + eventName: "Store_SetRecord", + } as const; + + expect(logToRecord({ table, log })).toStrictEqual(record); + }); +}); diff --git a/packages/store-sync/src/logToRecord.ts b/packages/store-sync/src/logToRecord.ts new file mode 100644 index 0000000000..4ec8f08363 --- /dev/null +++ b/packages/store-sync/src/logToRecord.ts @@ -0,0 +1,26 @@ +import { + PartialTable, + SchemaToPrimitives, + decodeKey, + decodeValueArgs, + getKeySchema, + getSchemaTypes, + getValueSchema, +} from "@latticexyz/protocol-parser/internal"; +import { StorageAdapterLog } from "./common"; + +type LogToRecordArgs
= { + table: table; + log: StorageAdapterLog & { eventName: "Store_SetRecord" }; +}; + +export function logToRecord
({ + table, + log, +}: LogToRecordArgs
): SchemaToPrimitives> { + const keySchema = getSchemaTypes(getKeySchema(table)); + const valueSchema = getSchemaTypes(getValueSchema(table)); + const key = decodeKey(keySchema, log.args.keyTuple); + const value = decodeValueArgs(valueSchema, log.args); + return { ...key, ...value }; +} diff --git a/packages/store-sync/src/recordToLog.test.ts b/packages/store-sync/src/recordToLog.test.ts new file mode 100644 index 0000000000..3ddf07166d --- /dev/null +++ b/packages/store-sync/src/recordToLog.test.ts @@ -0,0 +1,52 @@ +/* eslint-disable max-len */ +import { describe, it, expect } from "vitest"; +import { recordToLog } from "./recordToLog"; +import { defineTable } from "@latticexyz/store/config/v2"; +import { logToRecord } from "./logToRecord"; + +describe("recordToLog", () => { + it("should convert table record into a Store_SetRecord log", async () => { + const table = defineTable({ + label: "Test", + schema: { + key1: "uint32", + key2: "uint256", + value1: "address", + value2: "string", + }, + key: ["key1", "key2"], + }); + + const record = { + key1: 1, + key2: 2n, + value1: "0x3Aa5ebB10DC797CAC828524e59A333d0A371443c", + value2: "hello", + } as const; + + const log = recordToLog({ + address: "0x3Aa5ebB10DC797CAC828524e59A333d0A371443c", + table, + record, + }); + + expect(log).toMatchInlineSnapshot(` + { + "address": "0x3Aa5ebB10DC797CAC828524e59A333d0A371443c", + "args": { + "dynamicData": "0x68656c6c6f", + "encodedLengths": "0x0000000000000000000000000000000000000000000000000500000000000005", + "keyTuple": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + ], + "staticData": "0x3aa5ebb10dc797cac828524e59a333d0a371443c", + "tableId": "0x7462000000000000000000000000000054657374000000000000000000000000", + }, + "eventName": "Store_SetRecord", + } + `); + + expect(logToRecord({ table, log })).toStrictEqual(record); + }); +}); diff --git a/packages/store-sync/src/recordToLog.ts b/packages/store-sync/src/recordToLog.ts new file mode 100644 index 0000000000..55f027a6b1 --- /dev/null +++ b/packages/store-sync/src/recordToLog.ts @@ -0,0 +1,39 @@ +import { + SchemaToPrimitives, + encodeKey, + encodeValueArgs, + getKeySchema, + getSchemaTypes, + getValueSchema, + getKey, + getValue, + PartialTable, +} from "@latticexyz/protocol-parser/internal"; +import { StorageAdapterLog } from "./common"; +import { Table } from "@latticexyz/config"; +import { Hex } from "viem"; + +type RecordToLogArgs
= { + address: Hex; + table: table; + record: SchemaToPrimitives>; +}; + +export function recordToLog
({ + table, + record, + address, +}: RecordToLogArgs
): StorageAdapterLog & { eventName: "Store_SetRecord" } { + const keySchema = getSchemaTypes(getKeySchema(table)); + const valueSchema = getSchemaTypes(getValueSchema(table)); + + return { + eventName: "Store_SetRecord", + address: address, + args: { + tableId: table.tableId, + keyTuple: encodeKey(keySchema, getKey(table, record)), + ...encodeValueArgs(valueSchema, getValue(table, record)), + }, + }; +} diff --git a/packages/store-sync/tsup.config.ts b/packages/store-sync/tsup.config.ts index d8f6860bea..87447d624c 100644 --- a/packages/store-sync/tsup.config.ts +++ b/packages/store-sync/tsup.config.ts @@ -3,6 +3,7 @@ import { defineConfig } from "tsup"; export default defineConfig({ entry: [ "src/index.ts", + "src/dozer/index.ts", "src/sqlite/index.ts", "src/postgres/index.ts", "src/postgres-decoded/index.ts", From 52dd46b7daf7e8ab73dfeb7e52a37d2756de6d68 Mon Sep 17 00:00:00 2001 From: alvrs Date: Wed, 31 Jul 2024 18:49:02 +0100 Subject: [PATCH 02/28] add missing changes --- packages/protocol-parser/src/common.ts | 3 ++ .../src/decodeDozerField.test.ts | 9 ++++ .../protocol-parser/src/decodeDozerField.ts | 24 ++++++++++ .../src/decodeDozerRecords.test.ts | 38 ++++++++++++++++ .../protocol-parser/src/decodeDozerRecords.ts | 44 +++++++++++++++++++ .../protocol-parser/src/exports/internal.ts | 5 +++ packages/protocol-parser/src/getKey.test.ts | 25 +++++++++++ packages/protocol-parser/src/getKey.ts | 10 +++++ packages/protocol-parser/src/getKeySchema.ts | 8 ++-- packages/protocol-parser/src/getValue.test.ts | 25 +++++++++++ packages/protocol-parser/src/getValue.ts | 14 ++++++ packages/store-sync/package.json | 7 +-- 12 files changed, 203 insertions(+), 9 deletions(-) create mode 100644 packages/protocol-parser/src/decodeDozerField.test.ts create mode 100644 packages/protocol-parser/src/decodeDozerField.ts create mode 100644 packages/protocol-parser/src/decodeDozerRecords.test.ts create mode 100644 packages/protocol-parser/src/decodeDozerRecords.ts create mode 100644 packages/protocol-parser/src/getKey.test.ts create mode 100644 packages/protocol-parser/src/getKey.ts create mode 100644 packages/protocol-parser/src/getValue.test.ts create mode 100644 packages/protocol-parser/src/getValue.ts diff --git a/packages/protocol-parser/src/common.ts b/packages/protocol-parser/src/common.ts index 904a646ef1..fa72916f82 100644 --- a/packages/protocol-parser/src/common.ts +++ b/packages/protocol-parser/src/common.ts @@ -1,3 +1,4 @@ +import { Table } from "@latticexyz/config"; import { DynamicAbiType, SchemaAbiType, @@ -51,3 +52,5 @@ export type ValueArgs = { encodedLengths: Hex; dynamicData: Hex; }; + +export type PartialTable = Pick; diff --git a/packages/protocol-parser/src/decodeDozerField.test.ts b/packages/protocol-parser/src/decodeDozerField.test.ts new file mode 100644 index 0000000000..6fb7cec67c --- /dev/null +++ b/packages/protocol-parser/src/decodeDozerField.test.ts @@ -0,0 +1,9 @@ +import { describe, expect, it } from "vitest"; +import { decodeDozerField } from "./decodeDozerField"; + +describe("decodeDozerField", () => { + it("should decode numbers to the expected value type", () => { + expect(decodeDozerField("uint48", "1")).toBe(1); + expect(decodeDozerField("uint56", "1")).toBe(1n); + }); +}); diff --git a/packages/protocol-parser/src/decodeDozerField.ts b/packages/protocol-parser/src/decodeDozerField.ts new file mode 100644 index 0000000000..0e499ae9bc --- /dev/null +++ b/packages/protocol-parser/src/decodeDozerField.ts @@ -0,0 +1,24 @@ +import { AbiType } from "@latticexyz/config"; +import { + ArrayAbiType, + SchemaAbiTypeToPrimitiveType, + arrayToStaticAbiType, + schemaAbiTypeToDefaultValue, +} from "@latticexyz/schema-type/internal"; + +export function decodeDozerField( + abiType: abiType, + data: string | boolean | string[], +): SchemaAbiTypeToPrimitiveType { + const defaultValueType = typeof schemaAbiTypeToDefaultValue[abiType]; + if (Array.isArray(data)) { + return data.map((element) => decodeDozerField(arrayToStaticAbiType(abiType as ArrayAbiType), element)) as never; + } + if (defaultValueType === "number") { + return Number(data) as never; + } + if (defaultValueType === "bigint") { + return BigInt(data) as never; + } + return data as never; +} diff --git a/packages/protocol-parser/src/decodeDozerRecords.test.ts b/packages/protocol-parser/src/decodeDozerRecords.test.ts new file mode 100644 index 0000000000..91d00b5542 --- /dev/null +++ b/packages/protocol-parser/src/decodeDozerRecords.test.ts @@ -0,0 +1,38 @@ +import { describe, expect, it } from "vitest"; +import { decodeDozerRecords } from "./decodeDozerRecords"; + +describe("decodeDozerRecord", () => { + const schema = { + address: { type: "address", internalType: "address" }, + uint256: { type: "uint256", internalType: "uint256" }, + uint32: { type: "uint32", internalType: "uint32" }, + bool: { type: "bool", internalType: "bool" }, + bytes: { type: "bytes", internalType: "bytes" }, + string: { type: "string", internalType: "string" }, + uint32Arr: { type: "uint32[]", internalType: "uint32[]" }, + } as const; + + it("decodes dozer record", () => { + const dozerRecord = [ + "0x0000000000000000000000000000000000000000", + "1234", + "1234", + true, + "0x1234", + "hello world", + ["1234", "5678"], + ]; + const decodedRecord = { + address: "0x0000000000000000000000000000000000000000", + uint256: 1234n, + uint32: 1234, + bool: true, + bytes: "0x1234", + string: "hello world", + uint32Arr: [1234, 5678], + }; + + const decoded = decodeDozerRecords({ schema, records: [dozerRecord] }); + expect(decoded).toStrictEqual([decodedRecord]); + }); +}); diff --git a/packages/protocol-parser/src/decodeDozerRecords.ts b/packages/protocol-parser/src/decodeDozerRecords.ts new file mode 100644 index 0000000000..822f3d354b --- /dev/null +++ b/packages/protocol-parser/src/decodeDozerRecords.ts @@ -0,0 +1,44 @@ +import { Schema } from "@latticexyz/config"; +import { decodeDozerField } from "./decodeDozerField"; +import { getSchemaPrimitives } from "./getSchemaPrimitives"; + +type DozerQueryHeader = string[]; +type DozerQueryRecord = (string | boolean | string[])[]; + +// First item in the result is the header +export type DozerQueryResult = [DozerQueryHeader, ...DozerQueryRecord[]]; + +export type DecodeDozerRecordsArgs = { + schema: Schema; + records: DozerQueryResult; +}; + +/** + * Trim the header row from the query result + */ +function trimHeader(result: DozerQueryResult): DozerQueryRecord[] { + return result.slice(1); +} + +export type DecodeDozerRecordsResult = getSchemaPrimitives[]; + +export function decodeDozerRecords({ + schema, + records, +}: DecodeDozerRecordsArgs): DecodeDozerRecordsResult { + const fieldNames = Object.keys(schema); + if (records.length > 0 && fieldNames.length !== records[0].length) { + throw new Error( + `Mismatch between schema and query result.\nSchema: [${fieldNames.join(", ")}]\nQuery result: [${records[0].join(", ")}]`, + ); + } + + return trimHeader(records).map((record) => + Object.fromEntries( + Object.keys(schema).map((fieldName, index) => [ + fieldName, + decodeDozerField(schema[fieldName].type, record[index]), + ]), + ), + ) as never; +} diff --git a/packages/protocol-parser/src/exports/internal.ts b/packages/protocol-parser/src/exports/internal.ts index 6f2d68af6b..603a0fd985 100644 --- a/packages/protocol-parser/src/exports/internal.ts +++ b/packages/protocol-parser/src/exports/internal.ts @@ -25,6 +25,11 @@ export * from "../schemaToHex"; export * from "../staticDataLength"; export * from "../valueSchemaToFieldLayoutHex"; export * from "../valueSchemaToHex"; +export * from "../decodeDozerField"; +export * from "../decodeDozerRecords"; + +export * from "../getKey"; +export * from "../getValue"; export * from "../getKeySchema"; export * from "../getValueSchema"; diff --git a/packages/protocol-parser/src/getKey.test.ts b/packages/protocol-parser/src/getKey.test.ts new file mode 100644 index 0000000000..46e50255d2 --- /dev/null +++ b/packages/protocol-parser/src/getKey.test.ts @@ -0,0 +1,25 @@ +import { describe, expect, it } from "vitest"; +import { getKey } from "./getKey"; + +describe("getKey", () => { + it("should return the key fields of the record", () => { + const table = { + schema: { + key1: { type: "uint32", internalType: "uint32" }, + key2: { type: "uint256", internalType: "uint256" }, + value1: { type: "string", internalType: "string" }, + value2: { type: "string", internalType: "string" }, + }, + key: ["key1", "key2"], + } as const; + const record = { key1: 1, key2: 2n, value1: "hello", value2: "world" }; + const key = getKey(table, record); + + expect(key).toMatchInlineSnapshot(` + { + "key1": 1, + "key2": 2n, + } + `); + }); +}); diff --git a/packages/protocol-parser/src/getKey.ts b/packages/protocol-parser/src/getKey.ts new file mode 100644 index 0000000000..b633363810 --- /dev/null +++ b/packages/protocol-parser/src/getKey.ts @@ -0,0 +1,10 @@ +import { getKeySchema } from "./getKeySchema"; +import { getSchemaPrimitives } from "./getSchemaPrimitives"; +import { PartialTable } from "./common"; + +export function getKey
( + table: table, + record: getSchemaPrimitives, +): getSchemaPrimitives> { + return Object.fromEntries(table.key.map((fieldName) => [fieldName, record[fieldName]])) as never; +} diff --git a/packages/protocol-parser/src/getKeySchema.ts b/packages/protocol-parser/src/getKeySchema.ts index 9f8bd452fa..4fab4a77bf 100644 --- a/packages/protocol-parser/src/getKeySchema.ts +++ b/packages/protocol-parser/src/getKeySchema.ts @@ -1,11 +1,13 @@ -import { Schema, Table } from "@latticexyz/config"; +import { KeySchema, StaticAbiType, Table } from "@latticexyz/config"; type PartialTable = Pick; export type getKeySchema
= PartialTable extends table - ? Schema + ? KeySchema : { - readonly [fieldName in Extract]: table["schema"][fieldName]; + readonly [fieldName in Extract]: table["schema"][fieldName] & { + type: StaticAbiType; + }; }; export function getKeySchema
(table: table): getKeySchema
{ diff --git a/packages/protocol-parser/src/getValue.test.ts b/packages/protocol-parser/src/getValue.test.ts new file mode 100644 index 0000000000..612270d26b --- /dev/null +++ b/packages/protocol-parser/src/getValue.test.ts @@ -0,0 +1,25 @@ +import { describe, expect, it } from "vitest"; +import { getValue } from "./getValue"; + +describe("getValue", () => { + it("should return the key fields of the record", () => { + const table = { + schema: { + key1: { type: "uint32", internalType: "uint32" }, + key2: { type: "uint256", internalType: "uint256" }, + value1: { type: "string", internalType: "string" }, + value2: { type: "string", internalType: "string" }, + }, + key: ["key1", "key2"], + } as const; + const record = { key1: 1, key2: 2n, value1: "hello", value2: "world" }; + const value = getValue(table, record); + + expect(value).toMatchInlineSnapshot(` + { + "value1": "hello", + "value2": "world", + } + `); + }); +}); diff --git a/packages/protocol-parser/src/getValue.ts b/packages/protocol-parser/src/getValue.ts new file mode 100644 index 0000000000..f4591ad6a1 --- /dev/null +++ b/packages/protocol-parser/src/getValue.ts @@ -0,0 +1,14 @@ +import { PartialTable } from "./common"; +import { getValueSchema } from "./getValueSchema"; +import { getSchemaPrimitives } from "./getSchemaPrimitives"; + +export function getValue
( + table: table, + record: getSchemaPrimitives, +): getSchemaPrimitives> { + return Object.fromEntries( + Object.keys(table.schema) + .filter((fieldName) => !table.key.includes(fieldName)) + .map((fieldName) => [fieldName, record[fieldName]]), + ) as never; +} diff --git a/packages/store-sync/package.json b/packages/store-sync/package.json index 1e79d9405c..92714ff2ba 100644 --- a/packages/store-sync/package.json +++ b/packages/store-sync/package.json @@ -18,8 +18,7 @@ "./recs": "./dist/recs/index.js", "./sqlite": "./dist/sqlite/index.js", "./trpc-indexer": "./dist/trpc-indexer/index.js", - "./zustand": "./dist/zustand/index.js", - "./zustand-query": "./dist/zustand-query/index.js" + "./zustand": "./dist/zustand/index.js" }, "typesVersions": { "*": { @@ -46,9 +45,6 @@ ], "trpc-indexer": [ "./dist/trpc-indexer/index.d.ts" - ], - "zustand": [ - "./dist/zustand/index.d.ts" ] } }, @@ -77,7 +73,6 @@ "@latticexyz/schema-type": "workspace:*", "@latticexyz/store": "workspace:*", "@latticexyz/world": "workspace:*", - "@latticexyz/zustand-query": "workspace:*", "@trpc/client": "10.34.0", "@trpc/server": "10.34.0", "change-case": "^5.2.0", From 121479d321adde6aa4f2872b683974e913752438 Mon Sep 17 00:00:00 2001 From: alvrs Date: Wed, 31 Jul 2024 18:49:33 +0100 Subject: [PATCH 03/28] skip test --- packages/store-sync/src/dozer/fetchRecordsDozerSql.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/store-sync/src/dozer/fetchRecordsDozerSql.test.ts b/packages/store-sync/src/dozer/fetchRecordsDozerSql.test.ts index 3c65420e58..f4fc8cb4f5 100644 --- a/packages/store-sync/src/dozer/fetchRecordsDozerSql.test.ts +++ b/packages/store-sync/src/dozer/fetchRecordsDozerSql.test.ts @@ -5,7 +5,7 @@ import { selectFrom } from "./selectFrom"; describe("fetch dozer sql", () => { // TODO: set up CI test case for this (requires setting up dozer in CI) - it("should fetch dozer sql", async () => { + it.skip("should fetch dozer sql", async () => { const result = await fetchRecordsDozerSql({ dozerUrl: "https://redstone2.dozer.skystrife.xyz/q", storeAddress: "0x9d05cc196c87104a7196fcca41280729b505dbbf", From 9abfffc69937a01447d8a505a8703379742dcf27 Mon Sep 17 00:00:00 2001 From: alvrs Date: Wed, 31 Jul 2024 19:50:22 +0100 Subject: [PATCH 04/28] parallelize sql query requests --- .../src/dozer/fetchInitialBlockLogsDozer.ts | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/packages/store-sync/src/dozer/fetchInitialBlockLogsDozer.ts b/packages/store-sync/src/dozer/fetchInitialBlockLogsDozer.ts index edf6e2e5c0..c0b7337700 100644 --- a/packages/store-sync/src/dozer/fetchInitialBlockLogsDozer.ts +++ b/packages/store-sync/src/dozer/fetchInitialBlockLogsDozer.ts @@ -4,7 +4,7 @@ import { StorageAdapterBlock, SyncFilter } from "../common"; import { fetchRecordsDozerSql } from "./fetchRecordsDozerSql"; import { recordToLog } from "../recordToLog"; import { getSnapshot } from "../getSnapshot"; -import { bigIntMin } from "@latticexyz/common/utils"; +import { bigIntMin, isDefined } from "@latticexyz/common/utils"; export type FetchInitialBlockLogsDozerArgs = { dozerUrl: string; @@ -36,19 +36,19 @@ export async function fetchInitialBlockLogsDozer({ // partial updates), so we only notify consumers of state updates after the // initial hydration is complete. - const sqlFilters = filters && (filters.filter((filter) => "sql" in filter) as DozerTableQuery[]); + const sqlFilters = filters ? (filters.filter((filter) => "sql" in filter) as DozerTableQuery[]) : []; - // TODO: it might be more performant to execute individual sql queries separately than in one network request - // to parallelize on the backend (one request is expected to be execute against the same db state so it can't - // be parallelized). - const dozerTables = - sqlFilters && sqlFilters.length > 0 - ? await fetchRecordsDozerSql({ dozerUrl, storeAddress, queries: sqlFilters }) - : undefined; + // Execute individual SQL queries as separate requests to parallelize on the backend. + // Each individual request is expected to be executed against the same db state so it + // can't be parallelized. + const dozerTables = ( + await Promise.all(sqlFilters.map((filter) => fetchRecordsDozerSql({ dozerUrl, storeAddress, queries: [filter] }))) + ).filter(isDefined); - if (dozerTables) { - initialBlockLogs.blockNumber = dozerTables.blockHeight; - initialBlockLogs.logs = dozerTables.result.flatMap(({ table, records }) => + if (dozerTables.length > 0) { + // Use the minimum block number of all query results as the block number to start syncing from. + initialBlockLogs.blockNumber = bigIntMin(...dozerTables.map((result) => result.blockHeight)); + initialBlockLogs.logs = dozerTables.flatMap(({ result: [{ table, records }] }) => records.map((record) => recordToLog({ table, record, address: storeAddress })), ); } From 367daec008d59c03337b6414bc1bde1d40c93d04 Mon Sep 17 00:00:00 2001 From: alvrs Date: Thu, 1 Aug 2024 18:36:05 +0100 Subject: [PATCH 05/28] add KeySchema --- packages/config/src/common.ts | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/packages/config/src/common.ts b/packages/config/src/common.ts index 3577a2318d..31463c670c 100644 --- a/packages/config/src/common.ts +++ b/packages/config/src/common.ts @@ -23,6 +23,15 @@ export type Schema = { }; }; +export type KeySchema = { + readonly [fieldName: string]: { + /** the Solidity primitive ABI type */ + readonly type: StaticAbiType; + /** the user defined type or Solidity primitive ABI type */ + readonly internalType: string; + }; +}; + export type Table = { readonly label: string; readonly type: satisfy; From dc154d29c1a4833aace88518068965ecc45743e3 Mon Sep 17 00:00:00 2001 From: alvrs Date: Thu, 1 Aug 2024 18:42:13 +0100 Subject: [PATCH 06/28] move protocol-parser changes to store-sync --- packages/protocol-parser/src/common.ts | 3 --- packages/protocol-parser/src/exports/internal.ts | 2 -- .../src => store-sync/src/dozer}/decodeDozerField.test.ts | 0 .../src => store-sync/src/dozer}/decodeDozerField.ts | 0 .../src => store-sync/src/dozer}/decodeDozerRecords.test.ts | 4 +++- .../src => store-sync/src/dozer}/decodeDozerRecords.ts | 2 +- packages/store-sync/src/dozer/fetchRecordsDozerSql.ts | 2 +- 7 files changed, 5 insertions(+), 8 deletions(-) rename packages/{protocol-parser/src => store-sync/src/dozer}/decodeDozerField.test.ts (100%) rename packages/{protocol-parser/src => store-sync/src/dozer}/decodeDozerField.ts (100%) rename packages/{protocol-parser/src => store-sync/src/dozer}/decodeDozerRecords.test.ts (94%) rename packages/{protocol-parser/src => store-sync/src/dozer}/decodeDozerRecords.ts (94%) diff --git a/packages/protocol-parser/src/common.ts b/packages/protocol-parser/src/common.ts index fa72916f82..904a646ef1 100644 --- a/packages/protocol-parser/src/common.ts +++ b/packages/protocol-parser/src/common.ts @@ -1,4 +1,3 @@ -import { Table } from "@latticexyz/config"; import { DynamicAbiType, SchemaAbiType, @@ -52,5 +51,3 @@ export type ValueArgs = { encodedLengths: Hex; dynamicData: Hex; }; - -export type PartialTable = Pick; diff --git a/packages/protocol-parser/src/exports/internal.ts b/packages/protocol-parser/src/exports/internal.ts index 603a0fd985..5b1a57505f 100644 --- a/packages/protocol-parser/src/exports/internal.ts +++ b/packages/protocol-parser/src/exports/internal.ts @@ -25,8 +25,6 @@ export * from "../schemaToHex"; export * from "../staticDataLength"; export * from "../valueSchemaToFieldLayoutHex"; export * from "../valueSchemaToHex"; -export * from "../decodeDozerField"; -export * from "../decodeDozerRecords"; export * from "../getKey"; export * from "../getValue"; diff --git a/packages/protocol-parser/src/decodeDozerField.test.ts b/packages/store-sync/src/dozer/decodeDozerField.test.ts similarity index 100% rename from packages/protocol-parser/src/decodeDozerField.test.ts rename to packages/store-sync/src/dozer/decodeDozerField.test.ts diff --git a/packages/protocol-parser/src/decodeDozerField.ts b/packages/store-sync/src/dozer/decodeDozerField.ts similarity index 100% rename from packages/protocol-parser/src/decodeDozerField.ts rename to packages/store-sync/src/dozer/decodeDozerField.ts diff --git a/packages/protocol-parser/src/decodeDozerRecords.test.ts b/packages/store-sync/src/dozer/decodeDozerRecords.test.ts similarity index 94% rename from packages/protocol-parser/src/decodeDozerRecords.test.ts rename to packages/store-sync/src/dozer/decodeDozerRecords.test.ts index 91d00b5542..614722a46d 100644 --- a/packages/protocol-parser/src/decodeDozerRecords.test.ts +++ b/packages/store-sync/src/dozer/decodeDozerRecords.test.ts @@ -13,6 +13,8 @@ describe("decodeDozerRecord", () => { } as const; it("decodes dozer record", () => { + const dozerHeader = Object.keys(schema); + const dozerRecord = [ "0x0000000000000000000000000000000000000000", "1234", @@ -32,7 +34,7 @@ describe("decodeDozerRecord", () => { uint32Arr: [1234, 5678], }; - const decoded = decodeDozerRecords({ schema, records: [dozerRecord] }); + const decoded = decodeDozerRecords({ schema, records: [dozerHeader, dozerRecord] }); expect(decoded).toStrictEqual([decodedRecord]); }); }); diff --git a/packages/protocol-parser/src/decodeDozerRecords.ts b/packages/store-sync/src/dozer/decodeDozerRecords.ts similarity index 94% rename from packages/protocol-parser/src/decodeDozerRecords.ts rename to packages/store-sync/src/dozer/decodeDozerRecords.ts index 822f3d354b..6a86e9339d 100644 --- a/packages/protocol-parser/src/decodeDozerRecords.ts +++ b/packages/store-sync/src/dozer/decodeDozerRecords.ts @@ -1,6 +1,6 @@ import { Schema } from "@latticexyz/config"; +import { getSchemaPrimitives } from "@latticexyz/protocol-parser/internal"; import { decodeDozerField } from "./decodeDozerField"; -import { getSchemaPrimitives } from "./getSchemaPrimitives"; type DozerQueryHeader = string[]; type DozerQueryRecord = (string | boolean | string[])[]; diff --git a/packages/store-sync/src/dozer/fetchRecordsDozerSql.ts b/packages/store-sync/src/dozer/fetchRecordsDozerSql.ts index 6526d88ca6..63bcd82748 100644 --- a/packages/store-sync/src/dozer/fetchRecordsDozerSql.ts +++ b/packages/store-sync/src/dozer/fetchRecordsDozerSql.ts @@ -1,4 +1,4 @@ -import { DecodeDozerRecordsResult, DozerQueryResult, decodeDozerRecords } from "@latticexyz/protocol-parser/internal"; +import { DecodeDozerRecordsResult, DozerQueryResult, decodeDozerRecords } from "./decodeDozerRecords"; import { Hex } from "viem"; import { DozerTableQuery } from "./common"; import { Table } from "@latticexyz/config"; From bf885cc1e0a360fa48192af90c76aed3cff355f7 Mon Sep 17 00:00:00 2001 From: alvrs Date: Thu, 1 Aug 2024 18:44:59 +0100 Subject: [PATCH 07/28] move KeySchema --- packages/config/src/common.ts | 9 --------- packages/protocol-parser/src/getKeySchema.ts | 11 ++++++++++- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/config/src/common.ts b/packages/config/src/common.ts index 31463c670c..3577a2318d 100644 --- a/packages/config/src/common.ts +++ b/packages/config/src/common.ts @@ -23,15 +23,6 @@ export type Schema = { }; }; -export type KeySchema = { - readonly [fieldName: string]: { - /** the Solidity primitive ABI type */ - readonly type: StaticAbiType; - /** the user defined type or Solidity primitive ABI type */ - readonly internalType: string; - }; -}; - export type Table = { readonly label: string; readonly type: satisfy; diff --git a/packages/protocol-parser/src/getKeySchema.ts b/packages/protocol-parser/src/getKeySchema.ts index 4fab4a77bf..9023a14aa6 100644 --- a/packages/protocol-parser/src/getKeySchema.ts +++ b/packages/protocol-parser/src/getKeySchema.ts @@ -1,7 +1,16 @@ -import { KeySchema, StaticAbiType, Table } from "@latticexyz/config"; +import { StaticAbiType, Table } from "@latticexyz/config"; type PartialTable = Pick; +type KeySchema = { + readonly [fieldName: string]: { + /** the Solidity primitive ABI type */ + readonly type: StaticAbiType; + /** the user defined type or Solidity primitive ABI type */ + readonly internalType: string; + }; +}; + export type getKeySchema
= PartialTable extends table ? KeySchema : { From aca18b9105eb37c8f94f6b9a68947d020f327239 Mon Sep 17 00:00:00 2001 From: alvrs Date: Thu, 1 Aug 2024 18:46:40 +0100 Subject: [PATCH 08/28] keep zustand types --- packages/store-sync/package.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/store-sync/package.json b/packages/store-sync/package.json index 92714ff2ba..86d9da6054 100644 --- a/packages/store-sync/package.json +++ b/packages/store-sync/package.json @@ -45,6 +45,9 @@ ], "trpc-indexer": [ "./dist/trpc-indexer/index.d.ts" + ], + "zustand": [ + "./dist/zustand/index.d.ts" ] } }, From b36d6fe87674395d11514d6d82f0e8c0d97cfca9 Mon Sep 17 00:00:00 2001 From: alvrs Date: Thu, 1 Aug 2024 19:06:41 +0100 Subject: [PATCH 09/28] rename things --- packages/store-sync/src/dozer/common.ts | 6 +-- ...zerSql.test.ts => fetchRecordsSql.test.ts} | 6 +-- ...hRecordsDozerSql.ts => fetchRecordsSql.ts} | 22 +++++----- ...nitialBlockLogsDozer.ts => getSnapshot.ts} | 44 +++++++++---------- packages/store-sync/src/dozer/index.ts | 4 +- packages/store-sync/src/dozer/selectFrom.ts | 4 +- 6 files changed, 43 insertions(+), 43 deletions(-) rename packages/store-sync/src/dozer/{fetchRecordsDozerSql.test.ts => fetchRecordsSql.test.ts} (97%) rename packages/store-sync/src/dozer/{fetchRecordsDozerSql.ts => fetchRecordsSql.ts} (86%) rename packages/store-sync/src/dozer/{fetchInitialBlockLogsDozer.ts => getSnapshot.ts} (67%) diff --git a/packages/store-sync/src/dozer/common.ts b/packages/store-sync/src/dozer/common.ts index d12b65bba5..8c9b863382 100644 --- a/packages/store-sync/src/dozer/common.ts +++ b/packages/store-sync/src/dozer/common.ts @@ -1,7 +1,7 @@ import { Table } from "@latticexyz/config"; import { Hex } from "viem"; -export type DozerTableQuery = { +export type TableQuery = { table: Table; /** * SQL to filter the records of this table. @@ -12,7 +12,7 @@ export type DozerTableQuery = { sql: string; }; -export type DozerLogFilter = { +export type LogFilter = { /** * Filter logs by the table ID. */ @@ -27,4 +27,4 @@ export type DozerLogFilter = { key1?: Hex; }; -export type DozerSyncFilter = DozerTableQuery | DozerLogFilter; +export type SyncFilter = TableQuery | LogFilter; diff --git a/packages/store-sync/src/dozer/fetchRecordsDozerSql.test.ts b/packages/store-sync/src/dozer/fetchRecordsSql.test.ts similarity index 97% rename from packages/store-sync/src/dozer/fetchRecordsDozerSql.test.ts rename to packages/store-sync/src/dozer/fetchRecordsSql.test.ts index f4fc8cb4f5..960ff199d7 100644 --- a/packages/store-sync/src/dozer/fetchRecordsDozerSql.test.ts +++ b/packages/store-sync/src/dozer/fetchRecordsSql.test.ts @@ -1,12 +1,12 @@ import { describe, expect, it } from "vitest"; -import { fetchRecordsDozerSql } from "./fetchRecordsDozerSql"; +import { fetchRecordsSql } from "./fetchRecordsSql"; import mudConfig from "@latticexyz/world/mud.config"; import { selectFrom } from "./selectFrom"; -describe("fetch dozer sql", () => { +describe("fetchRecordsSql", () => { // TODO: set up CI test case for this (requires setting up dozer in CI) it.skip("should fetch dozer sql", async () => { - const result = await fetchRecordsDozerSql({ + const result = await fetchRecordsSql({ dozerUrl: "https://redstone2.dozer.skystrife.xyz/q", storeAddress: "0x9d05cc196c87104a7196fcca41280729b505dbbf", queries: [ diff --git a/packages/store-sync/src/dozer/fetchRecordsDozerSql.ts b/packages/store-sync/src/dozer/fetchRecordsSql.ts similarity index 86% rename from packages/store-sync/src/dozer/fetchRecordsDozerSql.ts rename to packages/store-sync/src/dozer/fetchRecordsSql.ts index 63bcd82748..a1d3164eb4 100644 --- a/packages/store-sync/src/dozer/fetchRecordsDozerSql.ts +++ b/packages/store-sync/src/dozer/fetchRecordsSql.ts @@ -1,6 +1,6 @@ import { DecodeDozerRecordsResult, DozerQueryResult, decodeDozerRecords } from "./decodeDozerRecords"; import { Hex } from "viem"; -import { DozerTableQuery } from "./common"; +import { TableQuery } from "./common"; import { Table } from "@latticexyz/config"; type DozerResponseSuccess = { @@ -12,13 +12,17 @@ type DozerResponseFail = { msg: string }; type DozerResponse = DozerResponseSuccess | DozerResponseFail; -type FetchDozerSqlArgs = { +function isDozerResponseFail(response: DozerResponse): response is DozerResponseFail { + return "msg" in response; +} + +type FetchRecordsSqlArgs = { dozerUrl: string; storeAddress: Hex; - queries: DozerTableQuery[]; + queries: TableQuery[]; }; -type FetchDozerSqlResult = +type FetchRecordsSqlResult = | { blockHeight: bigint; result: { @@ -28,15 +32,11 @@ type FetchDozerSqlResult = } | undefined; -function isDozerResponseFail(response: DozerResponse): response is DozerResponseFail { - return "msg" in response; -} - -export async function fetchRecordsDozerSql({ +export async function fetchRecordsSql({ dozerUrl, queries, storeAddress, -}: FetchDozerSqlArgs): Promise { +}: FetchRecordsSqlArgs): Promise { const response: DozerResponse = await ( await fetch(dozerUrl, { method: "POST", @@ -57,7 +57,7 @@ export async function fetchRecordsDozerSql({ return; } - const result: FetchDozerSqlResult = { + const result: FetchRecordsSqlResult = { blockHeight: BigInt(response.block_height), result: response.result.map((records, index) => ({ table: queries[index].table, diff --git a/packages/store-sync/src/dozer/fetchInitialBlockLogsDozer.ts b/packages/store-sync/src/dozer/getSnapshot.ts similarity index 67% rename from packages/store-sync/src/dozer/fetchInitialBlockLogsDozer.ts rename to packages/store-sync/src/dozer/getSnapshot.ts index c0b7337700..aff2eb0747 100644 --- a/packages/store-sync/src/dozer/fetchInitialBlockLogsDozer.ts +++ b/packages/store-sync/src/dozer/getSnapshot.ts @@ -1,30 +1,30 @@ -import { DozerLogFilter, DozerSyncFilter, DozerTableQuery } from "./common"; +import { LogFilter, SyncFilter, TableQuery } from "./common"; import { Hex } from "viem"; -import { StorageAdapterBlock, SyncFilter } from "../common"; -import { fetchRecordsDozerSql } from "./fetchRecordsDozerSql"; +import { StorageAdapterBlock, SyncFilter as LegacyLogFilter } from "../common"; +import { fetchRecordsSql } from "./fetchRecordsSql"; import { recordToLog } from "../recordToLog"; -import { getSnapshot } from "../getSnapshot"; +import { getSnapshot as getSnapshotLogs } from "../getSnapshot"; import { bigIntMin, isDefined } from "@latticexyz/common/utils"; -export type FetchInitialBlockLogsDozerArgs = { +export type GetSnapshotArgs = { dozerUrl: string; storeAddress: Hex; - filters?: DozerSyncFilter[]; + filters?: SyncFilter[]; startBlock?: bigint; chainId: number; }; -export type FetchInitialBlockLogsDozerResult = { +export type GetSnapshotResult = { initialBlockLogs: StorageAdapterBlock; }; -export async function fetchInitialBlockLogsDozer({ +export async function getSnapshot({ dozerUrl, storeAddress, filters, startBlock = 0n, chainId, -}: FetchInitialBlockLogsDozerArgs): Promise { +}: GetSnapshotArgs): Promise { const initialBlockLogs: StorageAdapterBlock = { blockNumber: startBlock, logs: [] }; // We execute the list of provided SQL queries for hydration. For performance @@ -36,13 +36,13 @@ export async function fetchInitialBlockLogsDozer({ // partial updates), so we only notify consumers of state updates after the // initial hydration is complete. - const sqlFilters = filters ? (filters.filter((filter) => "sql" in filter) as DozerTableQuery[]) : []; + const sqlFilters = filters ? (filters.filter((filter) => "sql" in filter) as TableQuery[]) : []; // Execute individual SQL queries as separate requests to parallelize on the backend. // Each individual request is expected to be executed against the same db state so it // can't be parallelized. const dozerTables = ( - await Promise.all(sqlFilters.map((filter) => fetchRecordsDozerSql({ dozerUrl, storeAddress, queries: [filter] }))) + await Promise.all(sqlFilters.map((filter) => fetchRecordsSql({ dozerUrl, storeAddress, queries: [filter] }))) ).filter(isDefined); if (dozerTables.length > 0) { @@ -54,30 +54,30 @@ export async function fetchInitialBlockLogsDozer({ } // Fetch the tables without SQL filter from the snapshot logs API for better performance. - const snapshotFilters = + const logsFilters = filters && filters .filter((filter) => !("sql" in filter)) .map((filter) => { - const { table, key0, key1 } = filter as DozerLogFilter; - return { tableId: table.tableId, key0, key1 } as SyncFilter; + const { table, key0, key1 } = filter as LogFilter; + return { tableId: table.tableId, key0, key1 } as LegacyLogFilter; }); - const snapshot = + const logs = // If no filters are provided, the entire state is fetched - !snapshotFilters || snapshotFilters.length > 0 - ? await getSnapshot({ + !logsFilters || logsFilters.length > 0 + ? await getSnapshotLogs({ chainId, address: storeAddress, - filters: snapshotFilters, + filters: logsFilters, indexerUrl: dozerUrl, }) : undefined; - // The block number passed in the overall result will be the min of all queries and the snapshot. - if (snapshot) { - initialBlockLogs.blockNumber = bigIntMin(initialBlockLogs.blockNumber, snapshot.blockNumber); - initialBlockLogs.logs = [...initialBlockLogs.logs, ...snapshot.logs]; + // The block number passed in the overall result will be the min of all queries and the logs. + if (logs) { + initialBlockLogs.blockNumber = bigIntMin(initialBlockLogs.blockNumber, logs.blockNumber); + initialBlockLogs.logs = [...initialBlockLogs.logs, ...logs.logs]; } return { initialBlockLogs }; diff --git a/packages/store-sync/src/dozer/index.ts b/packages/store-sync/src/dozer/index.ts index 844a4308ff..57db326f28 100644 --- a/packages/store-sync/src/dozer/index.ts +++ b/packages/store-sync/src/dozer/index.ts @@ -1,4 +1,4 @@ export * from "./common"; -export * from "./fetchRecordsDozerSql"; +export * from "./fetchRecordsSql"; export * from "./selectFrom"; -export * from "./fetchInitialBlockLogsDozer"; +export * from "./getSnapshot"; diff --git a/packages/store-sync/src/dozer/selectFrom.ts b/packages/store-sync/src/dozer/selectFrom.ts index 81b1d757e7..628febff4d 100644 --- a/packages/store-sync/src/dozer/selectFrom.ts +++ b/packages/store-sync/src/dozer/selectFrom.ts @@ -1,12 +1,12 @@ import { Table } from "@latticexyz/config"; -import { DozerTableQuery } from "./common"; +import { TableQuery } from "./common"; // For autocompletion but still allowing all SQL strings export type Where
= `"${keyof table["schema"] & string}"` | (string & {}); export type SelectFromArgs
= { table: table; where?: Where
; limit?: number }; -export function selectFrom
({ table, where, limit }: SelectFromArgs
): DozerTableQuery { +export function selectFrom
({ table, where, limit }: SelectFromArgs
): TableQuery { const dozerTableLabel = table.namespace === "" ? table.name : `${table.namespace}__${table.name}`; return { table: table, From 175fe7c2a64e54cb308ea9efa4e6006d80ff49df Mon Sep 17 00:00:00 2001 From: alvrs Date: Thu, 1 Aug 2024 20:08:56 +0100 Subject: [PATCH 10/28] fix type error --- packages/protocol-parser/src/getKey.ts | 4 +++- packages/protocol-parser/src/getValue.ts | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/packages/protocol-parser/src/getKey.ts b/packages/protocol-parser/src/getKey.ts index b633363810..539367f981 100644 --- a/packages/protocol-parser/src/getKey.ts +++ b/packages/protocol-parser/src/getKey.ts @@ -1,6 +1,8 @@ +import { Table } from "@latticexyz/config"; import { getKeySchema } from "./getKeySchema"; import { getSchemaPrimitives } from "./getSchemaPrimitives"; -import { PartialTable } from "./common"; + +type PartialTable = Pick; export function getKey
( table: table, diff --git a/packages/protocol-parser/src/getValue.ts b/packages/protocol-parser/src/getValue.ts index f4591ad6a1..73bf65f2a5 100644 --- a/packages/protocol-parser/src/getValue.ts +++ b/packages/protocol-parser/src/getValue.ts @@ -1,7 +1,9 @@ -import { PartialTable } from "./common"; +import { Table } from "@latticexyz/config"; import { getValueSchema } from "./getValueSchema"; import { getSchemaPrimitives } from "./getSchemaPrimitives"; +type PartialTable = Pick; + export function getValue
( table: table, record: getSchemaPrimitives, From a677f325b027a76e6a05f9c5950d86d3aae53654 Mon Sep 17 00:00:00 2001 From: alvrs Date: Thu, 1 Aug 2024 20:27:06 +0100 Subject: [PATCH 11/28] missed one PartialTable --- packages/store-sync/src/recordToLog.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/store-sync/src/recordToLog.ts b/packages/store-sync/src/recordToLog.ts index 55f027a6b1..f912004388 100644 --- a/packages/store-sync/src/recordToLog.ts +++ b/packages/store-sync/src/recordToLog.ts @@ -7,12 +7,13 @@ import { getValueSchema, getKey, getValue, - PartialTable, } from "@latticexyz/protocol-parser/internal"; import { StorageAdapterLog } from "./common"; import { Table } from "@latticexyz/config"; import { Hex } from "viem"; +type PartialTable = Pick; + type RecordToLogArgs
= { address: Hex; table: table; From 9c398d852185d46db6194c3c27708cde09209e03 Mon Sep 17 00:00:00 2001 From: alvrs Date: Thu, 1 Aug 2024 21:38:02 +0100 Subject: [PATCH 12/28] update logic in dozer/getSnapshot --- packages/store-sync/src/dozer/getSnapshot.ts | 80 ++++++++++---------- 1 file changed, 42 insertions(+), 38 deletions(-) diff --git a/packages/store-sync/src/dozer/getSnapshot.ts b/packages/store-sync/src/dozer/getSnapshot.ts index aff2eb0747..56a710a45f 100644 --- a/packages/store-sync/src/dozer/getSnapshot.ts +++ b/packages/store-sync/src/dozer/getSnapshot.ts @@ -25,8 +25,6 @@ export async function getSnapshot({ startBlock = 0n, chainId, }: GetSnapshotArgs): Promise { - const initialBlockLogs: StorageAdapterBlock = { blockNumber: startBlock, logs: [] }; - // We execute the list of provided SQL queries for hydration. For performance // reasons the queries are not executed against a fixed block height, but against // the latest state. We therefore pass the min block number of all query results @@ -38,47 +36,53 @@ export async function getSnapshot({ const sqlFilters = filters ? (filters.filter((filter) => "sql" in filter) as TableQuery[]) : []; - // Execute individual SQL queries as separate requests to parallelize on the backend. - // Each individual request is expected to be executed against the same db state so it - // can't be parallelized. - const dozerTables = ( - await Promise.all(sqlFilters.map((filter) => fetchRecordsSql({ dozerUrl, storeAddress, queries: [filter] }))) - ).filter(isDefined); + const fetchLogs = async (): Promise => { + // Fetch the tables without SQL filter from the snapshot logs API for better performance. + const logsFilters = + filters && + filters + .filter((filter) => !("sql" in filter)) + .map((filter) => { + const { table, key0, key1 } = filter as LogFilter; + return { tableId: table.tableId, key0, key1 } as LegacyLogFilter; + }); - if (dozerTables.length > 0) { - // Use the minimum block number of all query results as the block number to start syncing from. - initialBlockLogs.blockNumber = bigIntMin(...dozerTables.map((result) => result.blockHeight)); - initialBlockLogs.logs = dozerTables.flatMap(({ result: [{ table, records }] }) => - records.map((record) => recordToLog({ table, record, address: storeAddress })), - ); - } + if (logsFilters && logsFilters.length === 0) { + return undefined; + } - // Fetch the tables without SQL filter from the snapshot logs API for better performance. - const logsFilters = - filters && - filters - .filter((filter) => !("sql" in filter)) - .map((filter) => { - const { table, key0, key1 } = filter as LogFilter; - return { tableId: table.tableId, key0, key1 } as LegacyLogFilter; - }); + return getSnapshotLogs({ + chainId, + address: storeAddress, + filters: logsFilters, + indexerUrl: dozerUrl, + }); + }; - const logs = - // If no filters are provided, the entire state is fetched - !logsFilters || logsFilters.length > 0 - ? await getSnapshotLogs({ - chainId, - address: storeAddress, - filters: logsFilters, - indexerUrl: dozerUrl, - }) - : undefined; + const fetchSql = (): Promise[] => { + return sqlFilters.map(async (filter) => { + const result = await fetchRecordsSql({ dozerUrl, storeAddress, queries: [filter] }); + return ( + result && { + blockNumber: result.blockHeight, + logs: result.result.flatMap(({ table, records }) => + records.map((record) => recordToLog({ table, record, address: storeAddress })), + ), + } + ); + }); + }; + + // Execute individual SQL queries as separate requests to parallelize on the backend. + // Each individual request is expected to be executed against the same db state so it + // can't be parallelized. + const results = (await Promise.all([fetchLogs(), ...fetchSql()])).filter(isDefined); // The block number passed in the overall result will be the min of all queries and the logs. - if (logs) { - initialBlockLogs.blockNumber = bigIntMin(initialBlockLogs.blockNumber, logs.blockNumber); - initialBlockLogs.logs = [...initialBlockLogs.logs, ...logs.logs]; - } + const initialBlockLogs = { + blockNumber: results.length > 0 ? bigIntMin(...results.map((result) => result.blockNumber)) : startBlock, + logs: results.flatMap((result) => result.logs), + }; return { initialBlockLogs }; } From f22747ac9be59916fdf15ccbcfc75f7fe8764dda Mon Sep 17 00:00:00 2001 From: Andy Cernera Date: Mon, 5 Aug 2024 13:06:22 +0900 Subject: [PATCH 13/28] chore: include main branch when fetching during pre-release action --- .github/workflows/prerelease.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml index ed8d21c625..b0fe5ac340 100644 --- a/.github/workflows/prerelease.yml +++ b/.github/workflows/prerelease.yml @@ -26,6 +26,8 @@ jobs: uses: actions/checkout@v3 with: submodules: recursive + fetch-depth: 0 + ref: main - name: "Setup" uses: ./.github/actions/setup From 0af11faeb89b06c53d7b274aed6045da7798e8d7 Mon Sep 17 00:00:00 2001 From: Ori Pomerantz Date: Thu, 22 Aug 2024 11:43:06 -0500 Subject: [PATCH 14/28] docs(state-sync/dozer): first version (#3033) Co-authored-by: alvrs Co-authored-by: Andy Cernera --- docs/pages/state-query/_meta.js | 1 + docs/pages/state-query/dozer.mdx | 707 +++++++++++++++++++++++++++++++ 2 files changed, 708 insertions(+) create mode 100644 docs/pages/state-query/dozer.mdx diff --git a/docs/pages/state-query/_meta.js b/docs/pages/state-query/_meta.js index 2d943c4e61..9993b11cd5 100644 --- a/docs/pages/state-query/_meta.js +++ b/docs/pages/state-query/_meta.js @@ -1,3 +1,4 @@ export default { + dozer: "Dozer", typescript: "TypeScript", }; diff --git a/docs/pages/state-query/dozer.mdx b/docs/pages/state-query/dozer.mdx new file mode 100644 index 0000000000..325f1705bc --- /dev/null +++ b/docs/pages/state-query/dozer.mdx @@ -0,0 +1,707 @@ +import { CollapseCode } from "../../components/CollapseCode"; +import { Callout } from "nextra/components"; + +# Dozer + +If there is a dozer instance serving a blockchain, as there is for [Redstone](https://redstone.xyz/) and [Garnet](https://garnetchain.com/docs/what-is-redstone), you can use it to run queries on the data of any `World` on that blockchain. + +The query language is a subset of [the SQL `SELECT` command](). + +## Dozer URLs + +- [Redstone](https://redstone.xyz/) dozer `https://dozer.mud.redstonechain.com/q` +- [Garnet](https://garnetchain.com/) dozer - `https://dozer.mud.garnetchain.com/q` + +## Example `World` + +On Garnet there is a `World` at address [`0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e`](https://explorer.garnetchain.com/address/0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e) that runs a slightly modified version of [the React template](https://github.com/latticexyz/mud/tree/main/templates/react). +You can see the data schema for the `World` [in the block explorer](https://explorer.garnetchain.com/address/0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e?tab=mud). + +## Curl queries + +You can run dozer queries by communicating directly with the server's API, for example using [curl](https://curl.se/). + +### Simple query + +This query looks for some fields from a single table. + +1. Create a file, `query.json`, with this content. + + ```json filename="query.json" copy + [ + { + "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e", + "query": "SELECT id, description FROM app__Tasks" + } + ] + ``` + + + + Dozer does not support `SELECT * FROM
`, you have to specify column names. + + + +1. Run this command. Install `curl` and `jq` first if necessary. + + ```sh copy + curl https://dozer.mud.garnetchain.com/q --compressed \ + -H 'Accept-Encoding: gzip' \ + -H 'Content-Type: application/json' \ + -d @query.json | jq + ``` + +The output is a mapping with two fields, the block height for which the result is valid, and the result itself. +The result is a list of query responses, here it contains just one item because we only submitted a single query. +The query response is also a list. +The first entry is the field names, and all the other entries are rows returned by `SELECT`. + +``` +{ + "block_height": 5699682, + "result": [ + [ + [ + "id", + "description" + ], + [ + "0x3100000000000000000000000000000000000000000000000000000000000000", + "Walk the dog" + ], + [ + "0x3e0a112aadc5e02927fb4a91649bea565fd1baa1175aae4cb4957d6348f165cf", + "Test" + ], + ] + ] +} +``` + +Here we only care about the first result, so from now on we use this command line to tell `jq` to only show us that information. + +```sh copy +curl https://dozer.mud.garnetchain.com/q --compressed \ + -H 'Accept-Encoding: gzip' \ + -H 'Content-Type: application/json' \ + -d @query.json | jq '.result[0]' +``` + +### Conditions + +If we want to see only those tasks that haven't been completed we can use a `WHERE` clause. + +```json filename="query.json" copy +[ + { + "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e", + "query": "SELECT id, description FROM app__Tasks WHERE completedAt=0" + } +] +``` + +
+ +Results + +```json +[ + ["id", "description"], + ["0x3100000000000000000000000000000000000000000000000000000000000000", "Walk the dog"], + ["0x3e0a112aadc5e02927fb4a91649bea565fd1baa1175aae4cb4957d6348f165cf", "Test"] +] +``` + +
+ +### Limited results + +If you only want to see a few results, you can use a `LIMIT` clause. + +```json filename="query.json" copy +[ + { + "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e", + "query": "SELECT id, description FROM app__Tasks LIMIT 2" + } +] +``` + +
+ +Results + +```json +[ + ["id", "description"], + ["0x3100000000000000000000000000000000000000000000000000000000000000", "Walk the dog"], + ["0x3e0a112aadc5e02927fb4a91649bea565fd1baa1175aae4cb4957d6348f165cf", "Test"] +] +``` + +
+ +You can use `OFFSET` to get a paging effect. +For example, if you use this `query.json` you get two results, and the last row of the first one is repeated as the first row of the second one. + +```json filename="query.json" copy +[ + { + "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e", + "query": "SELECT id, description FROM app__Tasks LIMIT 3" + }, + { + "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e", + "query": "SELECT id, description FROM app__Tasks LIMIT 3 OFFSET 2" + } +] +``` + +
+ +Results + +Use this command to see the results of both queries. + +```sh copy +curl https://dozer.mud.garnetchain.com/q --compressed \ + -H 'Accept-Encoding: gzip' \ + -H 'Content-Type: application/json' -d @query.json \ + | jq '.result' +``` + +The result is: + +```json +[ + [ + ["id", "description"], + ["0x3100000000000000000000000000000000000000000000000000000000000000", "Walk the dog"], + ["0x3e0a112aadc5e02927fb4a91649bea565fd1baa1175aae4cb4957d6348f165cf", "Test"], + ["0xb15fd0e41ab0bb6eb992e0a3d4f30fce6ee24a5fc9c30f725fdfc96d9d16ed95", "Do the dishes"] + ], + [ + ["id", "description"], + ["0xb15fd0e41ab0bb6eb992e0a3d4f30fce6ee24a5fc9c30f725fdfc96d9d16ed95", "Do the dishes"], + ["0xb81d5036d0b62e0f2536635cbd5d7cec1d1f0706c0c6c1a9fa74293d7b0888eb", "Take out the trash"] + ] +] +``` + +
+ +### Sorted results + +If you want to control the order in which you get results, you can use an `ORDER BY` clause. + +```json filename="query.json" copy +[ + { + "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e", + "query": "SELECT description, createdAt FROM app__Tasks ORDER BY createdAt" + } +] +``` + +Note that the sort field(s) need to be part of the selected columns. + +
+ +Results + +```json +[ + ["description", "createdat"], + ["Walk the dog", "1723495628"], + ["Take out the trash", "1723495640"], + ["Do the dishes", "1723495642"], + ["Test", "1723495964"], + ["Test from a different account", "1723576522"], + ["Another test", "1723576522"], + ["Yet another test", "1723646440"] +] +``` + +
+ +### Multiple tables + +You can join multiple tables, using the same syntax SQL uses. + +```json filename="query.json" copy +[ + { + "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e", + "query": "SELECT app__Creator.id, description, taskCreator FROM app__Tasks, app__Creator WHERE app__Creator.id=app__Tasks.id" + } +] +``` + +
+ +Results + +```json +[ + ["id", "description", "taskcreator"], + [ + "0x3e0a112aadc5e02927fb4a91649bea565fd1baa1175aae4cb4957d6348f165cf", + "Test", + "0x735b2f2c662ebedffa94027a7196f0559f7f18a4" + ], + [ + "0x727d7bfe00b6db638c69595059dc10e21c52a7912d090905a7c7dc8659efd3b8", + "Test from a different account", + "0x428b1853e5ec29d35c84a218ec5170efc7621b58" + ], + [ + "0xb15fd0e41ab0bb6eb992e0a3d4f30fce6ee24a5fc9c30f725fdfc96d9d16ed95", + "Do the dishes", + "0x8225d72f2c39f3729d7f3fc03c6aa8731eaeef48" + ], + [ + "0xb81d5036d0b62e0f2536635cbd5d7cec1d1f0706c0c6c1a9fa74293d7b0888eb", + "Take out the trash", + "0x8225d72f2c39f3729d7f3fc03c6aa8731eaeef48" + ], + [ + "0xd43394ecf79077f65cd83b534dd44d3b4e9e2aa553e95aafecd14b8529543cda", + "Another test", + "0x428b1853e5ec29d35c84a218ec5170efc7621b58" + ] +] +``` + +
+ +### Grouping results + +You can use `GROUP BY` to identify different groups. +For example, this query gets you the different task creators. + +```json filename="query.json" copy +[ + { + "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e", + "query": "SELECT taskCreator FROM app__Creator GROUP BY taskCreator" + } +] +``` + +
+ +Results + +```json +[ + ["taskcreator"], + ["0x428b1853e5ec29d35c84a218ec5170efc7621b58"], + ["0x735b2f2c662ebedffa94027a7196f0559f7f18a4"], + ["0x8225d72f2c39f3729d7f3fc03c6aa8731eaeef48"] +] +``` + +
+ +## Typescript queries + +You can query dozer from [Typescript](https://www.typescriptlang.org/) without using MUD client synchronization. + +1. Create the project (in an empty directory) and install the software. + + ```sh copy + pnpm create ts-node + pnpm install + ``` + +1. Add the package that includes the dozer library. + + ```sh copy + pnpm install @latticexyz/store-sync @latticexyz/store + ``` + +1. Replace `src/main.ts` with this file. + + ```ts filename="main.ts" + import { fetchRecordsSql, selectFrom } from "@latticexyz/store-sync/dozer"; + import { defineStore } from "@latticexyz/store"; + + const config = defineStore({ + namespace: "app", + tables: { + Tasks: { + schema: { + id: "bytes32", + createdAt: "uint256", + completedAt: "uint256", + description: "string", + }, + key: ["id"], + }, + Creator: { + schema: { + id: "bytes32", + taskCreator: "address", + }, + key: ["id"], + }, + }, + }); + + const queryUncompleted = selectFrom({ + table: config.tables.app__Tasks, + where: "completedAt = 0", + limit: 2, + }); + + const queryResult = await fetchRecordsSql({ + dozerUrl: "https://dozer.mud.garnetchain.com/q", + storeAddress: "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e", + queries: [queryUncompleted], + }); + + console.log("\n\nTwo uncompleted tasks"); + console.log(`SQL: ${queryUncompleted.sql}\nResult:`); + console.log(queryResult.result[0].records); + ``` + +1. Compile and execute the application. + + ```sh copy + pnpm build && pnpm start + ``` + +
+ +Explanation + +```ts +import { fetchRecordsSql, selectFrom } from "@latticexyz/store-sync/dozer"; +import { defineStore } from "@latticexyz/store"; +``` + +Import the necessary definitions. + +```typescript +const config = defineStore({ + namespace: "app", + tables: { + ... + }, +}) +``` + +Create the table configuration. +The input to `defineStore` is the same as used in the [the `mud.config.ts` file](/config). + +```typescript +const queryUncompleted = selectFrom({ + table: config.tables.app__Tasks, + where: "completedAt = 0", + limit: 2, +}); +``` + +Create a query using [`selectFrom`](https://github.com/latticexyz/mud/blob/main/packages/store-sync/src/dozer/selectFrom.ts). +The queries supported by `selectFrom` are a subset of those dozer supports. +The results come from a single table, and only `WHERE` and `LIMIT` clauses are supported. + +```typescript +const queryResult = await fetchRecordsSql({ + dozerUrl: "https://dozer.mud.garnetchain.com/q", + storeAddress: "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e", + queries: [queryUncompleted], +}); +``` + +Run the query. + +```typescript +console.log("\n\nTwo uncompleted tasks"); +console.log(`SQL: ${queryUncompleted.sql}\nResult:`); +``` + +The SQL query that generated the resulting records. + +```typescript +console.log(queryResult.result[0].records); +``` + +The actual records. + +
+ +## Dozer with MUD synchronization + +Of course, you can also add dozer to a MUD client with synchronization. + +### Create a client to access the `World` + +These are the steps to create a client that can access the `World`. + +1. Create and run a react template application. + + ```sh copy + pnpm create mud@latest tasks --template react + cd tasks + pnpm dev + ``` + +1. [Browse to the application](http://localhost:3000/?chainId=17069&worldAddress=0x95f5d049b014114e2feeb5d8d994358ce4ffd06e). + The URL specifies the `chainId` and `worldAddress` for the `World`. + +1. In MUD DevTools see your account address and [fund it on Garnet](https://garnetchain.com/faucet). + You may need to get test ETH for your own address, and then transfer it to the account address the application uses. + +1. You can now create, complete, and delete tasks. + +1. To see the content of the `app__Creator` table, edit `packages/contracts/mud.config.ts` to add the `Creator` table definition. + + + + ```typescript filename="mud.config.ts" copy showLineNumbers {15-21} + import { defineWorld } from "@latticexyz/world"; + + export default defineWorld({ + namespace: "app", + tables: { + Tasks: { + schema: { + id: "bytes32", + createdAt: "uint256", + completedAt: "uint256", + description: "string", + }, + key: ["id"], + }, + Creator: { + schema: { + id: "bytes32", + taskCreator: "address", + }, + key: ["id"], + }, + }, + }); + ``` + + + +### Updating the client to use dozer + +The main purpose of dozer is to allow MUD clients to specify the subset of table records that a client needs, instead of synchronizing whole tables. + +To update the client, you change `packages/client/src/mud/setupNetwork.ts` to: + + + +```typescript filename="setupNetwork.ts" copy showLineNumbers {17-21, 84-101, 111-112} +/* + * The MUD client code is built on top of viem + * (https://viem.sh/docs/getting-started.html). + * This line imports the functions we need from it. + */ +import { + createPublicClient, + fallback, + webSocket, + http, + createWalletClient, + Hex, + ClientConfig, + getContract, +} from "viem"; + +import { DozerSyncFilter, getSnapshot, selectFrom } from "@latticexyz/store-sync/dozer"; + +import { syncToZustand } from "@latticexyz/store-sync/zustand"; +import { getNetworkConfig } from "./getNetworkConfig"; +import IWorldAbi from "contracts/out/IWorld.sol/IWorld.abi.json"; +import { createBurnerAccount, transportObserver, ContractWrite } from "@latticexyz/common"; +import { transactionQueue, writeObserver } from "@latticexyz/common/actions"; +import { Subject, share } from "rxjs"; + +/* + * Import our MUD config, which includes strong types for + * our tables and other config options. We use this to generate + * things like RECS components and get back strong types for them. + * + * See https://mud.dev/templates/typescript/contracts#mudconfigts + * for the source of this information. + */ +import mudConfig from "contracts/mud.config"; + +export type SetupNetworkResult = Awaited>; + +export async function setupNetwork() { + const networkConfig = await getNetworkConfig(); + + /* + * Create a viem public (read only) client + * (https://viem.sh/docs/clients/public.html) + */ + const clientOptions = { + chain: networkConfig.chain, + transport: transportObserver(fallback([webSocket(), http()])), + pollingInterval: 1000, + } as const satisfies ClientConfig; + + const publicClient = createPublicClient(clientOptions); + + /* + * Create an observable for contract writes that we can + * pass into MUD dev tools for transaction observability. + */ + const write$ = new Subject(); + + /* + * Create a temporary wallet and a viem client for it + * (see https://viem.sh/docs/clients/wallet.html). + */ + const burnerAccount = createBurnerAccount(networkConfig.privateKey as Hex); + const burnerWalletClient = createWalletClient({ + ...clientOptions, + account: burnerAccount, + }) + .extend(transactionQueue()) + .extend(writeObserver({ onWrite: (write) => write$.next(write) })); + + /* + * Create an object for communicating with the deployed World. + */ + const worldContract = getContract({ + address: networkConfig.worldAddress as Hex, + abi: IWorldAbi, + client: { public: publicClient, wallet: burnerWalletClient }, + }); + + const dozerUrl = "https://dozer.mud.garnetchain.com/q"; + const yesterday = Date.now() / 1000 - 24 * 60 * 60; + const filters: DozerSyncFilter[] = [ + selectFrom({ + table: mudConfig.tables.app__Tasks, + where: `"createdAt" > ${yesterday}`, + }), + { table: mudConfig.tables.app__Creator }, + ]; + const { initialBlockLogs } = await getSnapshot({ + dozerUrl, + storeAddress: networkConfig.worldAddress as Hex, + filters, + chainId: networkConfig.chainId, + }); + const liveSyncFilters = filters.map((filter) => ({ + tableId: filter.table.tableId, + })); + + /* + * Sync on-chain state into RECS and keeps our client in sync. + * Uses the MUD indexer if available, otherwise falls back + * to the viem publicClient to make RPC calls to fetch MUD + * events from the chain. + */ + const { tables, useStore, latestBlock$, storedBlockLogs$, waitForTransaction } = await syncToZustand({ + initialBlockLogs, + filters: liveSyncFilters, + config: mudConfig, + address: networkConfig.worldAddress as Hex, + publicClient, + startBlock: BigInt(networkConfig.initialBlockNumber), + }); + + return { + tables, + useStore, + publicClient, + walletClient: burnerWalletClient, + latestBlock$, + storedBlockLogs$, + waitForTransaction, + worldContract, + write$: write$.asObservable().pipe(share()), + }; +} +``` + + + +
+ +Explanation + +```typescript +import { DozerSyncFilter, getSnapshot, selectFrom } from "@latticexyz/store-sync/dozer"; +``` + +Import the dozer definitions we need. + +```typescript +const dozerUrl = "https://dozer.mud.garnetchain.com/q"; +``` + +The URL for the dozer service. +This is simplified testing code, on a production system this will probably be a lookup table based on the chainId. + +```typescript +const yesterday = Date.now() / 1000 - 24 * 60 * 60; +``` + +In JavaScript (and therefore TypeScript), time is stored as milliseconds since [the beginning of the epoch](https://en.wikipedia.org/wiki/Unix_time). +In UNIX, and therefore in Ethereum, time is stored as seconds since that same point. +This is the timestamp 24 hours ago. + +```typescript + const filters: DozerSyncFilter[] = [ +``` + +We create the filters for the tables we're interested in. + +```typescript + selectFrom({ + table: mudConfig.tables.app__Tasks, + where: `"createdAt" > ${yesterday}`, + }), +``` + +From the `app__Tasks` table we only want entries created in the last 24 hours. +To verify that the filter works as expected you can later change the code to only look for entries older than 24 hours. + +```typescript + { table: mudConfig.tables.app__Creator }, + ]; +``` + +We also want the `app__Counter` table. + +```typescript +const { initialBlockLogs } = await getSnapshot({ + dozerUrl, + storeAddress: networkConfig.worldAddress as Hex, + filters, + chainId: networkConfig.chainId, +}); +``` + +Get the initial snapshot to hydrate (fill with initial information) the data store. +Note that this snapshot does not have the actual data, but the events that created it. + +```typescript +const liveSyncFilters = filters.map((filter) => ({ + tableId: filter.table.tableId, +})); +``` + +The synchronization filters are a lot more limited. +[You can read the description of these filters here](/guides/hello-world/filter-sync#filtering). + +```typescript + const { ... } = await syncToZustand({ + initialBlockLogs, + filters: liveSyncFilters, + ... + }); +``` + +Finally, we provide `initialBlockLogs` for the hydration and `filters` for the updates to the synchronization function (either `syncToRecs` or `syncToZustand`). + +
From 25a2a776dc92be862dfaaff7a0e1455945a5af5f Mon Sep 17 00:00:00 2001 From: alvrs Date: Tue, 3 Sep 2024 20:34:03 +0000 Subject: [PATCH 15/28] remove dozer from file and function name --- packages/store-sync/src/dozer/decodeDozerField.test.ts | 9 --------- packages/store-sync/src/dozer/decodeField.test.ts | 9 +++++++++ .../src/dozer/{decodeDozerField.ts => decodeField.ts} | 4 ++-- ...{decodeDozerRecords.test.ts => decodeRecords.test.ts} | 4 ++-- .../dozer/{decodeDozerRecords.ts => decodeRecords.ts} | 9 +++------ packages/store-sync/src/dozer/fetchRecordsSql.ts | 4 ++-- 6 files changed, 18 insertions(+), 21 deletions(-) delete mode 100644 packages/store-sync/src/dozer/decodeDozerField.test.ts create mode 100644 packages/store-sync/src/dozer/decodeField.test.ts rename packages/store-sync/src/dozer/{decodeDozerField.ts => decodeField.ts} (76%) rename packages/store-sync/src/dozer/{decodeDozerRecords.test.ts => decodeRecords.test.ts} (88%) rename packages/store-sync/src/dozer/{decodeDozerRecords.ts => decodeRecords.ts} (81%) diff --git a/packages/store-sync/src/dozer/decodeDozerField.test.ts b/packages/store-sync/src/dozer/decodeDozerField.test.ts deleted file mode 100644 index 6fb7cec67c..0000000000 --- a/packages/store-sync/src/dozer/decodeDozerField.test.ts +++ /dev/null @@ -1,9 +0,0 @@ -import { describe, expect, it } from "vitest"; -import { decodeDozerField } from "./decodeDozerField"; - -describe("decodeDozerField", () => { - it("should decode numbers to the expected value type", () => { - expect(decodeDozerField("uint48", "1")).toBe(1); - expect(decodeDozerField("uint56", "1")).toBe(1n); - }); -}); diff --git a/packages/store-sync/src/dozer/decodeField.test.ts b/packages/store-sync/src/dozer/decodeField.test.ts new file mode 100644 index 0000000000..b056daef40 --- /dev/null +++ b/packages/store-sync/src/dozer/decodeField.test.ts @@ -0,0 +1,9 @@ +import { describe, expect, it } from "vitest"; +import { decodeField } from "./decodeField"; + +describe("decodeDozerField", () => { + it("should decode numbers to the expected value type", () => { + expect(decodeField("uint48", "1")).toBe(1); + expect(decodeField("uint56", "1")).toBe(1n); + }); +}); diff --git a/packages/store-sync/src/dozer/decodeDozerField.ts b/packages/store-sync/src/dozer/decodeField.ts similarity index 76% rename from packages/store-sync/src/dozer/decodeDozerField.ts rename to packages/store-sync/src/dozer/decodeField.ts index 0e499ae9bc..d79c5c6586 100644 --- a/packages/store-sync/src/dozer/decodeDozerField.ts +++ b/packages/store-sync/src/dozer/decodeField.ts @@ -6,13 +6,13 @@ import { schemaAbiTypeToDefaultValue, } from "@latticexyz/schema-type/internal"; -export function decodeDozerField( +export function decodeField( abiType: abiType, data: string | boolean | string[], ): SchemaAbiTypeToPrimitiveType { const defaultValueType = typeof schemaAbiTypeToDefaultValue[abiType]; if (Array.isArray(data)) { - return data.map((element) => decodeDozerField(arrayToStaticAbiType(abiType as ArrayAbiType), element)) as never; + return data.map((element) => decodeField(arrayToStaticAbiType(abiType as ArrayAbiType), element)) as never; } if (defaultValueType === "number") { return Number(data) as never; diff --git a/packages/store-sync/src/dozer/decodeDozerRecords.test.ts b/packages/store-sync/src/dozer/decodeRecords.test.ts similarity index 88% rename from packages/store-sync/src/dozer/decodeDozerRecords.test.ts rename to packages/store-sync/src/dozer/decodeRecords.test.ts index 614722a46d..fae7e6488b 100644 --- a/packages/store-sync/src/dozer/decodeDozerRecords.test.ts +++ b/packages/store-sync/src/dozer/decodeRecords.test.ts @@ -1,5 +1,5 @@ import { describe, expect, it } from "vitest"; -import { decodeDozerRecords } from "./decodeDozerRecords"; +import { decodeRecords } from "./decodeRecords"; describe("decodeDozerRecord", () => { const schema = { @@ -34,7 +34,7 @@ describe("decodeDozerRecord", () => { uint32Arr: [1234, 5678], }; - const decoded = decodeDozerRecords({ schema, records: [dozerHeader, dozerRecord] }); + const decoded = decodeRecords({ schema, records: [dozerHeader, dozerRecord] }); expect(decoded).toStrictEqual([decodedRecord]); }); }); diff --git a/packages/store-sync/src/dozer/decodeDozerRecords.ts b/packages/store-sync/src/dozer/decodeRecords.ts similarity index 81% rename from packages/store-sync/src/dozer/decodeDozerRecords.ts rename to packages/store-sync/src/dozer/decodeRecords.ts index 6a86e9339d..ef1120035b 100644 --- a/packages/store-sync/src/dozer/decodeDozerRecords.ts +++ b/packages/store-sync/src/dozer/decodeRecords.ts @@ -1,6 +1,6 @@ import { Schema } from "@latticexyz/config"; import { getSchemaPrimitives } from "@latticexyz/protocol-parser/internal"; -import { decodeDozerField } from "./decodeDozerField"; +import { decodeField } from "./decodeField"; type DozerQueryHeader = string[]; type DozerQueryRecord = (string | boolean | string[])[]; @@ -22,7 +22,7 @@ function trimHeader(result: DozerQueryResult): DozerQueryRecord[] { export type DecodeDozerRecordsResult = getSchemaPrimitives[]; -export function decodeDozerRecords({ +export function decodeRecords({ schema, records, }: DecodeDozerRecordsArgs): DecodeDozerRecordsResult { @@ -35,10 +35,7 @@ export function decodeDozerRecords({ return trimHeader(records).map((record) => Object.fromEntries( - Object.keys(schema).map((fieldName, index) => [ - fieldName, - decodeDozerField(schema[fieldName].type, record[index]), - ]), + Object.keys(schema).map((fieldName, index) => [fieldName, decodeField(schema[fieldName].type, record[index])]), ), ) as never; } diff --git a/packages/store-sync/src/dozer/fetchRecordsSql.ts b/packages/store-sync/src/dozer/fetchRecordsSql.ts index a1d3164eb4..e60636ea4d 100644 --- a/packages/store-sync/src/dozer/fetchRecordsSql.ts +++ b/packages/store-sync/src/dozer/fetchRecordsSql.ts @@ -1,4 +1,4 @@ -import { DecodeDozerRecordsResult, DozerQueryResult, decodeDozerRecords } from "./decodeDozerRecords"; +import { DecodeDozerRecordsResult, DozerQueryResult, decodeRecords } from "./decodeRecords"; import { Hex } from "viem"; import { TableQuery } from "./common"; import { Table } from "@latticexyz/config"; @@ -61,7 +61,7 @@ export async function fetchRecordsSql({ blockHeight: BigInt(response.block_height), result: response.result.map((records, index) => ({ table: queries[index].table, - records: decodeDozerRecords({ schema: queries[index].table.schema, records }), + records: decodeRecords({ schema: queries[index].table.schema, records }), })), }; From 6cb9b3fee11f0f629c2349fc3348c0f3976abd3a Mon Sep 17 00:00:00 2001 From: alvrs Date: Tue, 3 Sep 2024 21:00:04 +0000 Subject: [PATCH 16/28] rename fetchRecordsSql to fetchRecords --- packages/store-sync/src/dozer/decodeField.test.ts | 2 +- packages/store-sync/src/dozer/decodeRecords.test.ts | 2 +- .../dozer/{fetchRecordsSql.test.ts => fetchRecords.test.ts} | 6 +++--- .../src/dozer/{fetchRecordsSql.ts => fetchRecords.ts} | 2 +- packages/store-sync/src/dozer/getSnapshot.ts | 4 ++-- packages/store-sync/src/dozer/index.ts | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) rename packages/store-sync/src/dozer/{fetchRecordsSql.test.ts => fetchRecords.test.ts} (97%) rename packages/store-sync/src/dozer/{fetchRecordsSql.ts => fetchRecords.ts} (97%) diff --git a/packages/store-sync/src/dozer/decodeField.test.ts b/packages/store-sync/src/dozer/decodeField.test.ts index b056daef40..aa56131aee 100644 --- a/packages/store-sync/src/dozer/decodeField.test.ts +++ b/packages/store-sync/src/dozer/decodeField.test.ts @@ -1,7 +1,7 @@ import { describe, expect, it } from "vitest"; import { decodeField } from "./decodeField"; -describe("decodeDozerField", () => { +describe("decodeField", () => { it("should decode numbers to the expected value type", () => { expect(decodeField("uint48", "1")).toBe(1); expect(decodeField("uint56", "1")).toBe(1n); diff --git a/packages/store-sync/src/dozer/decodeRecords.test.ts b/packages/store-sync/src/dozer/decodeRecords.test.ts index fae7e6488b..b59ddd5b65 100644 --- a/packages/store-sync/src/dozer/decodeRecords.test.ts +++ b/packages/store-sync/src/dozer/decodeRecords.test.ts @@ -1,7 +1,7 @@ import { describe, expect, it } from "vitest"; import { decodeRecords } from "./decodeRecords"; -describe("decodeDozerRecord", () => { +describe("decodeRecord", () => { const schema = { address: { type: "address", internalType: "address" }, uint256: { type: "uint256", internalType: "uint256" }, diff --git a/packages/store-sync/src/dozer/fetchRecordsSql.test.ts b/packages/store-sync/src/dozer/fetchRecords.test.ts similarity index 97% rename from packages/store-sync/src/dozer/fetchRecordsSql.test.ts rename to packages/store-sync/src/dozer/fetchRecords.test.ts index 960ff199d7..888fff6cce 100644 --- a/packages/store-sync/src/dozer/fetchRecordsSql.test.ts +++ b/packages/store-sync/src/dozer/fetchRecords.test.ts @@ -1,12 +1,12 @@ import { describe, expect, it } from "vitest"; -import { fetchRecordsSql } from "./fetchRecordsSql"; +import { fetchRecords } from "./fetchRecords"; import mudConfig from "@latticexyz/world/mud.config"; import { selectFrom } from "./selectFrom"; -describe("fetchRecordsSql", () => { +describe("fetchRecords", () => { // TODO: set up CI test case for this (requires setting up dozer in CI) it.skip("should fetch dozer sql", async () => { - const result = await fetchRecordsSql({ + const result = await fetchRecords({ dozerUrl: "https://redstone2.dozer.skystrife.xyz/q", storeAddress: "0x9d05cc196c87104a7196fcca41280729b505dbbf", queries: [ diff --git a/packages/store-sync/src/dozer/fetchRecordsSql.ts b/packages/store-sync/src/dozer/fetchRecords.ts similarity index 97% rename from packages/store-sync/src/dozer/fetchRecordsSql.ts rename to packages/store-sync/src/dozer/fetchRecords.ts index e60636ea4d..b224f0cb09 100644 --- a/packages/store-sync/src/dozer/fetchRecordsSql.ts +++ b/packages/store-sync/src/dozer/fetchRecords.ts @@ -32,7 +32,7 @@ type FetchRecordsSqlResult = } | undefined; -export async function fetchRecordsSql({ +export async function fetchRecords({ dozerUrl, queries, storeAddress, diff --git a/packages/store-sync/src/dozer/getSnapshot.ts b/packages/store-sync/src/dozer/getSnapshot.ts index 56a710a45f..087cf57b11 100644 --- a/packages/store-sync/src/dozer/getSnapshot.ts +++ b/packages/store-sync/src/dozer/getSnapshot.ts @@ -1,7 +1,7 @@ import { LogFilter, SyncFilter, TableQuery } from "./common"; import { Hex } from "viem"; import { StorageAdapterBlock, SyncFilter as LegacyLogFilter } from "../common"; -import { fetchRecordsSql } from "./fetchRecordsSql"; +import { fetchRecords } from "./fetchRecords"; import { recordToLog } from "../recordToLog"; import { getSnapshot as getSnapshotLogs } from "../getSnapshot"; import { bigIntMin, isDefined } from "@latticexyz/common/utils"; @@ -61,7 +61,7 @@ export async function getSnapshot({ const fetchSql = (): Promise[] => { return sqlFilters.map(async (filter) => { - const result = await fetchRecordsSql({ dozerUrl, storeAddress, queries: [filter] }); + const result = await fetchRecords({ dozerUrl, storeAddress, queries: [filter] }); return ( result && { blockNumber: result.blockHeight, diff --git a/packages/store-sync/src/dozer/index.ts b/packages/store-sync/src/dozer/index.ts index 57db326f28..aeeec5224e 100644 --- a/packages/store-sync/src/dozer/index.ts +++ b/packages/store-sync/src/dozer/index.ts @@ -1,4 +1,4 @@ export * from "./common"; -export * from "./fetchRecordsSql"; +export * from "./fetchRecords"; export * from "./selectFrom"; export * from "./getSnapshot"; From f2fbaadca1eb77484c17e13567e21966eb983396 Mon Sep 17 00:00:00 2001 From: alvrs Date: Tue, 3 Sep 2024 21:04:32 +0000 Subject: [PATCH 17/28] use dozer base url as input to fetchRecords --- packages/store-sync/src/dozer/fetchRecords.ts | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/packages/store-sync/src/dozer/fetchRecords.ts b/packages/store-sync/src/dozer/fetchRecords.ts index b224f0cb09..eda012d0f8 100644 --- a/packages/store-sync/src/dozer/fetchRecords.ts +++ b/packages/store-sync/src/dozer/fetchRecords.ts @@ -16,13 +16,13 @@ function isDozerResponseFail(response: DozerResponse): response is DozerResponse return "msg" in response; } -type FetchRecordsSqlArgs = { +type FetchRecordsArgs = { dozerUrl: string; storeAddress: Hex; queries: TableQuery[]; }; -type FetchRecordsSqlResult = +type FetchRecordsResult = | { blockHeight: bigint; result: { @@ -32,13 +32,9 @@ type FetchRecordsSqlResult = } | undefined; -export async function fetchRecords({ - dozerUrl, - queries, - storeAddress, -}: FetchRecordsSqlArgs): Promise { +export async function fetchRecords({ dozerUrl, queries, storeAddress }: FetchRecordsArgs): Promise { const response: DozerResponse = await ( - await fetch(dozerUrl, { + await fetch(new URL("/q", dozerUrl), { method: "POST", headers: { "Content-Type": "application/json", @@ -57,7 +53,7 @@ export async function fetchRecords({ return; } - const result: FetchRecordsSqlResult = { + const result: FetchRecordsResult = { blockHeight: BigInt(response.block_height), result: response.result.map((records, index) => ({ table: queries[index].table, From c92258e187740df56bdb6b856098af44a82eba2f Mon Sep 17 00:00:00 2001 From: alvrs Date: Tue, 3 Sep 2024 21:18:39 +0000 Subject: [PATCH 18/28] move error handling into getSnapshot --- packages/store-sync/src/dozer/fetchRecords.ts | 19 ++-- packages/store-sync/src/dozer/getSnapshot.ts | 100 +++++++++--------- 2 files changed, 59 insertions(+), 60 deletions(-) diff --git a/packages/store-sync/src/dozer/fetchRecords.ts b/packages/store-sync/src/dozer/fetchRecords.ts index eda012d0f8..480dccbab8 100644 --- a/packages/store-sync/src/dozer/fetchRecords.ts +++ b/packages/store-sync/src/dozer/fetchRecords.ts @@ -22,15 +22,13 @@ type FetchRecordsArgs = { queries: TableQuery[]; }; -type FetchRecordsResult = - | { - blockHeight: bigint; - result: { - table: Table; - records: DecodeDozerRecordsResult; - }[]; - } - | undefined; +type FetchRecordsResult = { + blockHeight: bigint; + result: { + table: Table; + records: DecodeDozerRecordsResult; + }[]; +}; export async function fetchRecords({ dozerUrl, queries, storeAddress }: FetchRecordsArgs): Promise { const response: DozerResponse = await ( @@ -44,13 +42,12 @@ export async function fetchRecords({ dozerUrl, queries, storeAddress }: FetchRec ).json(); if (isDozerResponseFail(response)) { - console.warn(`Dozer response: ${response.msg}\n\nTry reproducing via cURL: + throw new Error(`Dozer response: ${response.msg}\n\nTry reproducing via cURL: curl ${dozerUrl} \\ --compressed \\ -H 'Accept-Encoding: gzip' \\ -H 'Content-Type: application/json' \\ -d '[${queries.map((query) => `{"address": "${storeAddress}", "query": "${query.sql.replaceAll('"', '\\"')}"}`).join(",")}]'`); - return; } const result: FetchRecordsResult = { diff --git a/packages/store-sync/src/dozer/getSnapshot.ts b/packages/store-sync/src/dozer/getSnapshot.ts index 087cf57b11..03751a5099 100644 --- a/packages/store-sync/src/dozer/getSnapshot.ts +++ b/packages/store-sync/src/dozer/getSnapshot.ts @@ -25,64 +25,66 @@ export async function getSnapshot({ startBlock = 0n, chainId, }: GetSnapshotArgs): Promise { - // We execute the list of provided SQL queries for hydration. For performance - // reasons the queries are not executed against a fixed block height, but against - // the latest state. We therefore pass the min block number of all query results - // as overall block number. This means some logs will be re-fetched again during - // the hydration process, but after the hydration is complete, the state will be - // correct. Intermediate state updates during hydration might be incorrect (for - // partial updates), so we only notify consumers of state updates after the - // initial hydration is complete. + try { + // We execute the list of provided SQL queries for hydration. For performance + // reasons the queries are not executed against a fixed block height, but against + // the latest state. We therefore pass the min block number of all query results + // as overall block number. This means some logs will be re-fetched again during + // the hydration process, but after the hydration is complete, the state will be + // correct. Intermediate state updates during hydration might be incorrect (for + // partial updates), so we only notify consumers of state updates after the + // initial hydration is complete. - const sqlFilters = filters ? (filters.filter((filter) => "sql" in filter) as TableQuery[]) : []; + const sqlFilters = filters ? (filters.filter((filter) => "sql" in filter) as TableQuery[]) : []; - const fetchLogs = async (): Promise => { - // Fetch the tables without SQL filter from the snapshot logs API for better performance. - const logsFilters = - filters && - filters - .filter((filter) => !("sql" in filter)) - .map((filter) => { - const { table, key0, key1 } = filter as LogFilter; - return { tableId: table.tableId, key0, key1 } as LegacyLogFilter; - }); + const fetchLogs = async (): Promise => { + // Fetch the tables without SQL filter from the snapshot logs API for better performance. + const logsFilters = + filters && + filters + .filter((filter) => !("sql" in filter)) + .map((filter) => { + const { table, key0, key1 } = filter as LogFilter; + return { tableId: table.tableId, key0, key1 } as LegacyLogFilter; + }); - if (logsFilters && logsFilters.length === 0) { - return undefined; - } + if (logsFilters && logsFilters.length === 0) { + return undefined; + } - return getSnapshotLogs({ - chainId, - address: storeAddress, - filters: logsFilters, - indexerUrl: dozerUrl, - }); - }; + return getSnapshotLogs({ + chainId, + address: storeAddress, + filters: logsFilters, + indexerUrl: dozerUrl, + }); + }; - const fetchSql = (): Promise[] => { - return sqlFilters.map(async (filter) => { - const result = await fetchRecords({ dozerUrl, storeAddress, queries: [filter] }); - return ( - result && { + const fetchSql = (): Promise[] => { + return sqlFilters.map(async (filter) => { + const result = await fetchRecords({ dozerUrl, storeAddress, queries: [filter] }); + return { blockNumber: result.blockHeight, logs: result.result.flatMap(({ table, records }) => records.map((record) => recordToLog({ table, record, address: storeAddress })), ), - } - ); - }); - }; + }; + }); + }; - // Execute individual SQL queries as separate requests to parallelize on the backend. - // Each individual request is expected to be executed against the same db state so it - // can't be parallelized. - const results = (await Promise.all([fetchLogs(), ...fetchSql()])).filter(isDefined); + // Execute individual SQL queries as separate requests to parallelize on the backend. + // Each individual request is expected to be executed against the same db state so it + // can't be parallelized. + const results = (await Promise.all([fetchLogs(), ...fetchSql()])).filter(isDefined); + // The block number passed in the overall result will be the min of all queries and the logs. + const initialBlockLogs = { + blockNumber: results.length > 0 ? bigIntMin(...results.map((result) => result.blockNumber)) : startBlock, + logs: results.flatMap((result) => result.logs), + }; - // The block number passed in the overall result will be the min of all queries and the logs. - const initialBlockLogs = { - blockNumber: results.length > 0 ? bigIntMin(...results.map((result) => result.blockNumber)) : startBlock, - logs: results.flatMap((result) => result.logs), - }; - - return { initialBlockLogs }; + return { initialBlockLogs }; + } catch (e) { + console.warn(`Failed to load snapshot. ${e}`); + return { initialBlockLogs: { blockNumber: startBlock - 1n, logs: [] } }; + } } From 494b0bb643e4579501d9e87e1dfaca53baec2875 Mon Sep 17 00:00:00 2001 From: alvrs Date: Tue, 3 Sep 2024 21:24:11 +0000 Subject: [PATCH 19/28] refactors --- packages/store-sync/src/dozer/fetchRecords.ts | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/store-sync/src/dozer/fetchRecords.ts b/packages/store-sync/src/dozer/fetchRecords.ts index 480dccbab8..a7239697c3 100644 --- a/packages/store-sync/src/dozer/fetchRecords.ts +++ b/packages/store-sync/src/dozer/fetchRecords.ts @@ -31,15 +31,15 @@ type FetchRecordsResult = { }; export async function fetchRecords({ dozerUrl, queries, storeAddress }: FetchRecordsArgs): Promise { - const response: DozerResponse = await ( - await fetch(new URL("/q", dozerUrl), { - method: "POST", - headers: { - "Content-Type": "application/json", - }, - body: JSON.stringify(queries.map((query) => ({ address: storeAddress, query: query.sql }))), - }) - ).json(); + const query = JSON.stringify(queries.map((query) => ({ address: storeAddress, query: query.sql }))); + + const response: DozerResponse = await fetch(dozerUrl, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: query, + }).then((res) => res.json()); if (isDozerResponseFail(response)) { throw new Error(`Dozer response: ${response.msg}\n\nTry reproducing via cURL: @@ -47,7 +47,7 @@ export async function fetchRecords({ dozerUrl, queries, storeAddress }: FetchRec --compressed \\ -H 'Accept-Encoding: gzip' \\ -H 'Content-Type: application/json' \\ - -d '[${queries.map((query) => `{"address": "${storeAddress}", "query": "${query.sql.replaceAll('"', '\\"')}"}`).join(",")}]'`); + -d '${query.replaceAll("'", "\\'")}'`); } const result: FetchRecordsResult = { From 11c5cdd9f281e9cf4f4c85b4fc516b7284ed2f4b Mon Sep 17 00:00:00 2001 From: alvrs Date: Tue, 3 Sep 2024 21:32:59 +0000 Subject: [PATCH 20/28] stylistic changes --- packages/store-sync/src/dozer/getSnapshot.ts | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/packages/store-sync/src/dozer/getSnapshot.ts b/packages/store-sync/src/dozer/getSnapshot.ts index 03751a5099..3c2e26370f 100644 --- a/packages/store-sync/src/dozer/getSnapshot.ts +++ b/packages/store-sync/src/dozer/getSnapshot.ts @@ -60,22 +60,20 @@ export async function getSnapshot({ }); }; - const fetchSql = (): Promise[] => { - return sqlFilters.map(async (filter) => { - const result = await fetchRecords({ dozerUrl, storeAddress, queries: [filter] }); - return { - blockNumber: result.blockHeight, - logs: result.result.flatMap(({ table, records }) => - records.map((record) => recordToLog({ table, record, address: storeAddress })), - ), - }; - }); + const fetchSql = async (query: TableQuery): Promise => { + const result = await fetchRecords({ dozerUrl, storeAddress, queries: [query] }); + return { + blockNumber: result.blockHeight, + logs: result.result.flatMap(({ table, records }) => + records.map((record) => recordToLog({ table, record, address: storeAddress })), + ), + }; }; // Execute individual SQL queries as separate requests to parallelize on the backend. // Each individual request is expected to be executed against the same db state so it // can't be parallelized. - const results = (await Promise.all([fetchLogs(), ...fetchSql()])).filter(isDefined); + const results = (await Promise.all([fetchLogs(), ...sqlFilters.map(fetchSql)])).filter(isDefined); // The block number passed in the overall result will be the min of all queries and the logs. const initialBlockLogs = { blockNumber: results.length > 0 ? bigIntMin(...results.map((result) => result.blockNumber)) : startBlock, From 9adc159b769bc05b34a786876ffcc3a48b475dcc Mon Sep 17 00:00:00 2001 From: alvrs Date: Tue, 3 Sep 2024 21:38:04 +0000 Subject: [PATCH 21/28] remove unrelated change --- .github/workflows/prerelease.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml index b0fe5ac340..ed8d21c625 100644 --- a/.github/workflows/prerelease.yml +++ b/.github/workflows/prerelease.yml @@ -26,8 +26,6 @@ jobs: uses: actions/checkout@v3 with: submodules: recursive - fetch-depth: 0 - ref: main - name: "Setup" uses: ./.github/actions/setup From 403179f6c0f90899736120a0a9c76fec1f7276dd Mon Sep 17 00:00:00 2001 From: alvrs Date: Thu, 5 Sep 2024 13:20:40 +0000 Subject: [PATCH 22/28] review fixes --- packages/protocol-parser/src/getKeySchema.ts | 2 +- packages/store-sync/src/dozer/decodeRecords.ts | 6 +++--- packages/store-sync/src/dozer/fetchRecords.ts | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/protocol-parser/src/getKeySchema.ts b/packages/protocol-parser/src/getKeySchema.ts index 9023a14aa6..857ba2e04f 100644 --- a/packages/protocol-parser/src/getKeySchema.ts +++ b/packages/protocol-parser/src/getKeySchema.ts @@ -2,7 +2,7 @@ import { StaticAbiType, Table } from "@latticexyz/config"; type PartialTable = Pick; -type KeySchema = { +export type KeySchema = { readonly [fieldName: string]: { /** the Solidity primitive ABI type */ readonly type: StaticAbiType; diff --git a/packages/store-sync/src/dozer/decodeRecords.ts b/packages/store-sync/src/dozer/decodeRecords.ts index ef1120035b..17c829ddb0 100644 --- a/packages/store-sync/src/dozer/decodeRecords.ts +++ b/packages/store-sync/src/dozer/decodeRecords.ts @@ -8,7 +8,7 @@ type DozerQueryRecord = (string | boolean | string[])[]; // First item in the result is the header export type DozerQueryResult = [DozerQueryHeader, ...DozerQueryRecord[]]; -export type DecodeDozerRecordsArgs = { +export type DecodeRecordsArgs = { schema: Schema; records: DozerQueryResult; }; @@ -20,12 +20,12 @@ function trimHeader(result: DozerQueryResult): DozerQueryRecord[] { return result.slice(1); } -export type DecodeDozerRecordsResult = getSchemaPrimitives[]; +export type DecodeRecordsResult = getSchemaPrimitives[]; export function decodeRecords({ schema, records, -}: DecodeDozerRecordsArgs): DecodeDozerRecordsResult { +}: DecodeRecordsArgs): DecodeRecordsResult { const fieldNames = Object.keys(schema); if (records.length > 0 && fieldNames.length !== records[0].length) { throw new Error( diff --git a/packages/store-sync/src/dozer/fetchRecords.ts b/packages/store-sync/src/dozer/fetchRecords.ts index a7239697c3..caa049c8e5 100644 --- a/packages/store-sync/src/dozer/fetchRecords.ts +++ b/packages/store-sync/src/dozer/fetchRecords.ts @@ -1,4 +1,4 @@ -import { DecodeDozerRecordsResult, DozerQueryResult, decodeRecords } from "./decodeRecords"; +import { DecodeRecordsResult, DozerQueryResult, decodeRecords } from "./decodeRecords"; import { Hex } from "viem"; import { TableQuery } from "./common"; import { Table } from "@latticexyz/config"; @@ -26,7 +26,7 @@ type FetchRecordsResult = { blockHeight: bigint; result: { table: Table; - records: DecodeDozerRecordsResult; + records: DecodeRecordsResult; }[]; }; From 6488f2c1951d536b1f24e2a589d5bcc24565450f Mon Sep 17 00:00:00 2001 From: alvrs Date: Thu, 5 Sep 2024 13:24:52 +0000 Subject: [PATCH 23/28] fix export conflict --- packages/protocol-parser/src/getKeySchema.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/protocol-parser/src/getKeySchema.ts b/packages/protocol-parser/src/getKeySchema.ts index 857ba2e04f..3d6ed45efe 100644 --- a/packages/protocol-parser/src/getKeySchema.ts +++ b/packages/protocol-parser/src/getKeySchema.ts @@ -2,7 +2,7 @@ import { StaticAbiType, Table } from "@latticexyz/config"; type PartialTable = Pick; -export type KeySchema = { +export type ResolvedKeySchema = { readonly [fieldName: string]: { /** the Solidity primitive ABI type */ readonly type: StaticAbiType; @@ -12,7 +12,7 @@ export type KeySchema = { }; export type getKeySchema
= PartialTable extends table - ? KeySchema + ? ResolvedKeySchema : { readonly [fieldName in Extract]: table["schema"][fieldName] & { type: StaticAbiType; From 1c7724003dc17245659a276d2c224a958ce9f142 Mon Sep 17 00:00:00 2001 From: Ori Pomerantz Date: Tue, 17 Sep 2024 00:40:10 -0500 Subject: [PATCH 24/28] =?UTF-8?q?docs(filter-sync=20and=20dozer):=20explai?= =?UTF-8?q?n=20that=20there=20are=20two=20sync=20types=20=F0=9F=9A=97=20?= =?UTF-8?q?=20(#3179)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/components/common-text/FilterTypes.mdx | 13 +++++++++++++ docs/pages/guides/hello-world/filter-sync.mdx | 15 +++++++++++++++ docs/pages/state-query/dozer.mdx | 15 +++++++++++---- 3 files changed, 39 insertions(+), 4 deletions(-) create mode 100644 docs/components/common-text/FilterTypes.mdx diff --git a/docs/components/common-text/FilterTypes.mdx b/docs/components/common-text/FilterTypes.mdx new file mode 100644 index 0000000000..2b5a438901 --- /dev/null +++ b/docs/components/common-text/FilterTypes.mdx @@ -0,0 +1,13 @@ +import { Callout } from "nextra/components"; + + +MUD initial data hydration, and therefore filtering, comes in two flavors: [Dozer](/state-query/dozer) and [generic](/guides/hello-world/filter-sync). +Note that this is for the initial hydration, currently limits on on-going synchronization are limited to [the generic method](/guides/hello-world/filter-sync). + +| | Dozer | Generic | +| -------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Filtering | Can filter on most SQL functions | Can only filter on tables and the first two key fields (limited by [`eth_getLogs`](https://ethereum.github.io/execution-apis/api-documentation/) filters) | +| Availability | [Redstone](https://redstone.xyz/docs/network-info), [Garnet](https://garnetchain.com/docs/network-info), or elsewhere if you run your own instance | Any EVM chain | +| Security assumptions | The Dozer instance returns accurate information | The endpoint returns accurate information (same assumption as any other blockchain app) | + + diff --git a/docs/pages/guides/hello-world/filter-sync.mdx b/docs/pages/guides/hello-world/filter-sync.mdx index 11cde7387f..e9078f93c9 100644 --- a/docs/pages/guides/hello-world/filter-sync.mdx +++ b/docs/pages/guides/hello-world/filter-sync.mdx @@ -1,10 +1,25 @@ import { CollapseCode } from "../../../components/CollapseCode"; +import FilterTypes from "../../../components/common-text/FilterTypes.mdx"; # Filter data synchronization In this tutorial you modify `networkSetup.ts` to filter the information you synchronize. Filtering information this way allows you to reduce the use of network resources and makes loading times faster. + + +
+ +Why are only the first two key fields available for filtering? + +Ethereum log entries can have [up to four indexed fields](https://www.evm.codes/?fork=cancun#a4). +However, Solidity only supports [three indexed fields](https://www.alchemy.com/overviews/solidity-events) because the first indexed field is used for the event name and type. +In MUD, [this field](https://github.com/latticexyz/mud/blob/main/packages/store/src/IStoreEvents.sol) specifies whether [a new record is created](https://github.com/latticexyz/mud/blob/main/packages/store/src/IStoreEvents.sol#L26-L32), a record is changed (either [static fields](https://github.com/latticexyz/mud/blob/main/packages/store/src/IStoreEvents.sol#L43) or [dynamic fields](https://github.com/latticexyz/mud/blob/main/packages/store/src/IStoreEvents.sol#L56-L64)), or [a record is deleted](https://github.com/latticexyz/mud/blob/main/packages/store/src/IStoreEvents.sol#L71). +The second indexed fields is always the table's [resource ID](/world/resource-ids). +This leaves two fields for key fields. + +
+ ## Setup To see the effects of filtering we need a table with entries to filter. To get such a table: diff --git a/docs/pages/state-query/dozer.mdx b/docs/pages/state-query/dozer.mdx index 325f1705bc..395ee444e5 100644 --- a/docs/pages/state-query/dozer.mdx +++ b/docs/pages/state-query/dozer.mdx @@ -1,15 +1,22 @@ import { CollapseCode } from "../../components/CollapseCode"; +import FilterTypes from "../../components/common-text/FilterTypes.mdx"; import { Callout } from "nextra/components"; # Dozer -If there is a dozer instance serving a blockchain, as there is for [Redstone](https://redstone.xyz/) and [Garnet](https://garnetchain.com/docs/what-is-redstone), you can use it to run queries on the data of any `World` on that blockchain. + + +If there is a dozer instance serving a blockchain, as there is for [Redstone](https://redstone.xyz/) and [Garnet](https://garnetchain.com/docs/what-is-redstone), you can use it to: + +- Run queries on the data of any `World` on that blockchain. +- [Speed up the initial hydration](#dozer-with-mud-synchronization) by reducing the amount of data that needs to be synchronized. + This is important for the user experience, because until the initial hydration is done the client is typically unusable. The query language is a subset of [the SQL `SELECT` command](). ## Dozer URLs -- [Redstone](https://redstone.xyz/) dozer `https://dozer.mud.redstonechain.com/q` +- [Redstone](https://redstone.xyz/) dozer - `https://dozer.mud.redstonechain.com/q` - [Garnet](https://garnetchain.com/) dozer - `https://dozer.mud.garnetchain.com/q` ## Example `World` @@ -433,7 +440,7 @@ The actual records. ## Dozer with MUD synchronization -Of course, you can also add dozer to a MUD client with synchronization. +You can also add dozer to a MUD client to speed up the initial hydration. ### Create a client to access the `World` @@ -495,7 +502,7 @@ To update the client, you change `packages/client/src/mud/setupNetwork.ts` to: -```typescript filename="setupNetwork.ts" copy showLineNumbers {17-21, 84-101, 111-112} +```typescript filename="setupNetwork.ts" copy showLineNumbers {17, 80-97, 106-107} /* * The MUD client code is built on top of viem * (https://viem.sh/docs/getting-started.html). From b819a2cb3ae27649f9ad7447e7d8b91c200d6795 Mon Sep 17 00:00:00 2001 From: Ori Pomerantz Date: Tue, 17 Sep 2024 13:42:05 -0500 Subject: [PATCH 25/28] =?UTF-8?q?docs(dozer):=20add=20metadata=20query=20?= =?UTF-8?q?=F0=9F=9A=97=20=20(#3186)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/pages/state-query/dozer.mdx | 191 +++++++++++++++++++++++++++++++ 1 file changed, 191 insertions(+) diff --git a/docs/pages/state-query/dozer.mdx b/docs/pages/state-query/dozer.mdx index 395ee444e5..ede697887f 100644 --- a/docs/pages/state-query/dozer.mdx +++ b/docs/pages/state-query/dozer.mdx @@ -310,6 +310,197 @@ For example, this query gets you the different task creators. +### Metadata + +You can use the `/tables` path to get the list of either all tables, or all tables that match a string. +As per the SQL standard, the wildcard is `%`. + +1. Create a file, `tables.json`. + + ```json filename="tables.json" + { + "address": "0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e", + "query": { + "name": "%" + } + } + ``` + +1. Run this command. + + ```sh copy + curl https://dozer.mud.garnetchain.com/tables --compressed \ + -H 'Accept-Encoding: gzip' \ + -H 'Content-Type: application/json' \ + -d @tables.json | jq + ``` + +
+ +Results + +```json +[ + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x746273746f72650000000000000000005461626c657300000000000000000000", + "key_names": ["tableId"], + "val_names": ["fieldLayout", "keySchema", "valueSchema", "abiEncodedKeyNames", "abiEncodedFieldNames"], + "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000", + "val_schema": "0x006003025f5f5fc4c40000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x746273746f72650000000000000000005265736f757263654964730000000000", + "key_names": ["resourceId"], + "val_names": ["exists"], + "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000", + "val_schema": "0x0001010060000000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x746273746f726500000000000000000053746f7265486f6f6b73000000000000", + "key_names": ["tableId"], + "val_names": ["hooks"], + "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000", + "val_schema": "0x00000001b6000000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x7462776f726c640000000000000000004e616d6573706163654f776e65720000", + "key_names": ["namespaceId"], + "val_names": ["owner"], + "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000", + "val_schema": "0x0014010061000000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x7462776f726c6400000000000000000042616c616e6365730000000000000000", + "key_names": ["namespaceId"], + "val_names": ["balance"], + "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000", + "val_schema": "0x002001001f000000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x7462776f726c64000000000000000000496e7374616c6c65644d6f64756c6573", + "key_names": ["moduleAddress", "argumentsHash"], + "val_names": ["isInstalled"], + "key_schema": "0x00340200615f0000000000000000000000000000000000000000000000000000", + "val_schema": "0x0001010060000000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x7462776f726c640000000000000000005573657244656c65676174696f6e436f", + "key_names": ["delegator", "delegatee"], + "val_names": ["delegationControlId"], + "key_schema": "0x0028020061610000000000000000000000000000000000000000000000000000", + "val_schema": "0x002001005f000000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x7462776f726c640000000000000000004e616d65737061636544656c65676174", + "key_names": ["namespaceId"], + "val_names": ["delegationControlId"], + "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000", + "val_schema": "0x002001005f000000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x7462776f726c640000000000000000005265736f757263654163636573730000", + "key_names": ["resourceId", "caller"], + "val_names": ["access"], + "key_schema": "0x003402005f610000000000000000000000000000000000000000000000000000", + "val_schema": "0x0001010060000000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x7462776f726c6400000000000000000053797374656d73000000000000000000", + "key_names": ["systemId"], + "val_names": ["system", "publicAccess"], + "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000", + "val_schema": "0x0015020061600000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x7462776f726c6400000000000000000046756e6374696f6e53656c6563746f72", + "key_names": ["worldFunctionSelector"], + "val_names": ["systemId", "systemFunctionSelector"], + "key_schema": "0x0004010043000000000000000000000000000000000000000000000000000000", + "val_schema": "0x002402005f430000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x6f74776f726c6400000000000000000046756e6374696f6e5369676e61747572", + "key_names": ["functionSelector"], + "val_names": ["functionSignature"], + "key_schema": "0x0004010043000000000000000000000000000000000000000000000000000000", + "val_schema": "0x00000001c5000000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x7462776f726c6400000000000000000053797374656d486f6f6b730000000000", + "key_names": ["systemId"], + "val_names": ["value"], + "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000", + "val_schema": "0x00000001b6000000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x7462776f726c6400000000000000000053797374656d52656769737472790000", + "key_names": ["system"], + "val_names": ["systemId"], + "key_schema": "0x0014010061000000000000000000000000000000000000000000000000000000", + "val_schema": "0x002001005f000000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x7462776f726c64000000000000000000496e69744d6f64756c65416464726573", + "key_names": [], + "val_names": ["value"], + "key_schema": "0x0000000000000000000000000000000000000000000000000000000000000000", + "val_schema": "0x0014010061000000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x7462617070000000000000000000000043726561746f72000000000000000000", + "key_names": ["id"], + "val_names": ["taskCreator"], + "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000", + "val_schema": "0x0014010061000000000000000000000000000000000000000000000000000000", + "query_name": null + }, + { + "address": "0x95f5d049b014114e2feeb5d8d994358ce4ffd06e", + "table_id": "0x746261707000000000000000000000005461736b730000000000000000000000", + "key_names": ["id"], + "val_names": ["createdAt", "completedAt", "description"], + "key_schema": "0x002001005f000000000000000000000000000000000000000000000000000000", + "val_schema": "0x004002011f1fc500000000000000000000000000000000000000000000000000", + "query_name": null + } +] +``` + +
+ +To interpret the results, [see the table documentation](/store/tables#advanced). + ## Typescript queries You can query dozer from [Typescript](https://www.typescriptlang.org/) without using MUD client synchronization. From 3a6d21e96b795127dfbe7e38477cff85dac61477 Mon Sep 17 00:00:00 2001 From: Ori Pomerantz Date: Tue, 17 Sep 2024 16:18:35 -0500 Subject: [PATCH 26/28] docs(guide/dozer): move hydration to its own guide --- docs/pages/guides/hello-world/_meta.js | 1 + docs/pages/guides/hello-world/dozer.mdx | 278 ++++++++++++++++++++++++ docs/pages/state-query/dozer.mdx | 275 ----------------------- 3 files changed, 279 insertions(+), 275 deletions(-) create mode 100644 docs/pages/guides/hello-world/dozer.mdx diff --git a/docs/pages/guides/hello-world/_meta.js b/docs/pages/guides/hello-world/_meta.js index 8c4b16f604..8ecd28fd3f 100644 --- a/docs/pages/guides/hello-world/_meta.js +++ b/docs/pages/guides/hello-world/_meta.js @@ -1,6 +1,7 @@ export default { "add-table": "Add a table", "filter-sync": "Filter data synchronization", + "dozer": "Add dozer hydration", "add-system": "Add a system", "deploy": { "title": "Deploy to a blockchain", diff --git a/docs/pages/guides/hello-world/dozer.mdx b/docs/pages/guides/hello-world/dozer.mdx new file mode 100644 index 0000000000..d88351b184 --- /dev/null +++ b/docs/pages/guides/hello-world/dozer.mdx @@ -0,0 +1,278 @@ +import { CollapseCode } from "../../../components/CollapseCode"; + +# Add dozer hydration + +In this tutorial you learn how to add dozer hydration to an existing MUD application, such as the ones created by the template. +To avoid running dozer locally, we use a `World` on Garnet at address [`0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e`](https://explorer.garnetchain.com/address/0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e) that runs a slightly modified version of [the React template](https://github.com/latticexyz/mud/tree/main/templates/react). +You can see the data schema for the `World` [in the block explorer](https://explorer.garnetchain.com/address/0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e?tab=mud). + +## Create a client to access the `World` + +These are the steps to create a client that can access the `World`. + +1. Create and run a react template application. + + ```sh copy + pnpm create mud@latest tasks --template react + cd tasks + pnpm dev + ``` + +1. [Browse to the application](http://localhost:3000/?chainId=17069&worldAddress=0x95f5d049b014114e2feeb5d8d994358ce4ffd06e). + The URL specifies the `chainId` and `worldAddress` for the `World`. + +1. In MUD DevTools see your account address and [fund it on Garnet](https://garnetchain.com/faucet). + You may need to get test ETH for your own address, and then transfer it to the account address the application uses. + +1. You can now create, complete, and delete tasks. + +1. To see the content of the `app__Creator` table, edit `packages/contracts/mud.config.ts` to add the `Creator` table definition. + + + + ```typescript filename="mud.config.ts" copy showLineNumbers {15-21} + import { defineWorld } from "@latticexyz/world"; + + export default defineWorld({ + namespace: "app", + tables: { + Tasks: { + schema: { + id: "bytes32", + createdAt: "uint256", + completedAt: "uint256", + description: "string", + }, + key: ["id"], + }, + Creator: { + schema: { + id: "bytes32", + taskCreator: "address", + }, + key: ["id"], + }, + }, + }); + ``` + + + +### Updating the client to use dozer + +The main purpose of dozer is to allow MUD clients to specify the subset of table records that a client needs, instead of synchronizing whole tables. + +To update the client, you change `packages/client/src/mud/setupNetwork.ts` to: + + + +```typescript filename="setupNetwork.ts" copy showLineNumbers {17, 80-97, 106-107} +/* + * The MUD client code is built on top of viem + * (https://viem.sh/docs/getting-started.html). + * This line imports the functions we need from it. + */ +import { + createPublicClient, + fallback, + webSocket, + http, + createWalletClient, + Hex, + ClientConfig, + getContract, +} from "viem"; + +import { DozerSyncFilter, getSnapshot, selectFrom } from "@latticexyz/store-sync/dozer"; + +import { syncToZustand } from "@latticexyz/store-sync/zustand"; +import { getNetworkConfig } from "./getNetworkConfig"; +import IWorldAbi from "contracts/out/IWorld.sol/IWorld.abi.json"; +import { createBurnerAccount, transportObserver, ContractWrite } from "@latticexyz/common"; +import { transactionQueue, writeObserver } from "@latticexyz/common/actions"; +import { Subject, share } from "rxjs"; + +/* + * Import our MUD config, which includes strong types for + * our tables and other config options. We use this to generate + * things like RECS components and get back strong types for them. + * + * See https://mud.dev/templates/typescript/contracts#mudconfigts + * for the source of this information. + */ +import mudConfig from "contracts/mud.config"; + +export type SetupNetworkResult = Awaited>; + +export async function setupNetwork() { + const networkConfig = await getNetworkConfig(); + + /* + * Create a viem public (read only) client + * (https://viem.sh/docs/clients/public.html) + */ + const clientOptions = { + chain: networkConfig.chain, + transport: transportObserver(fallback([webSocket(), http()])), + pollingInterval: 1000, + } as const satisfies ClientConfig; + + const publicClient = createPublicClient(clientOptions); + + /* + * Create an observable for contract writes that we can + * pass into MUD dev tools for transaction observability. + */ + const write$ = new Subject(); + + /* + * Create a temporary wallet and a viem client for it + * (see https://viem.sh/docs/clients/wallet.html). + */ + const burnerAccount = createBurnerAccount(networkConfig.privateKey as Hex); + const burnerWalletClient = createWalletClient({ + ...clientOptions, + account: burnerAccount, + }) + .extend(transactionQueue()) + .extend(writeObserver({ onWrite: (write) => write$.next(write) })); + + /* + * Create an object for communicating with the deployed World. + */ + const worldContract = getContract({ + address: networkConfig.worldAddress as Hex, + abi: IWorldAbi, + client: { public: publicClient, wallet: burnerWalletClient }, + }); + + const dozerUrl = "https://dozer.mud.garnetchain.com/q"; + const yesterday = Date.now() / 1000 - 24 * 60 * 60; + const filters: DozerSyncFilter[] = [ + selectFrom({ + table: mudConfig.tables.app__Tasks, + where: `"createdAt" > ${yesterday}`, + }), + { table: mudConfig.tables.app__Creator }, + ]; + const { initialBlockLogs } = await getSnapshot({ + dozerUrl, + storeAddress: networkConfig.worldAddress as Hex, + filters, + chainId: networkConfig.chainId, + }); + const liveSyncFilters = filters.map((filter) => ({ + tableId: filter.table.tableId, + })); + + /* + * Sync on-chain state into RECS and keeps our client in sync. + * Uses the MUD indexer if available, otherwise falls back + * to the viem publicClient to make RPC calls to fetch MUD + * events from the chain. + */ + const { tables, useStore, latestBlock$, storedBlockLogs$, waitForTransaction } = await syncToZustand({ + initialBlockLogs, + filters: liveSyncFilters, + config: mudConfig, + address: networkConfig.worldAddress as Hex, + publicClient, + startBlock: BigInt(networkConfig.initialBlockNumber), + }); + + return { + tables, + useStore, + publicClient, + walletClient: burnerWalletClient, + latestBlock$, + storedBlockLogs$, + waitForTransaction, + worldContract, + write$: write$.asObservable().pipe(share()), + }; +} +``` + + + +
+ +Explanation + +```typescript +import { DozerSyncFilter, getSnapshot, selectFrom } from "@latticexyz/store-sync/dozer"; +``` + +Import the dozer definitions we need. + +```typescript +const dozerUrl = "https://dozer.mud.garnetchain.com/q"; +``` + +The URL for the dozer service. +This is simplified testing code, on a production system this will probably be a lookup table based on the chainId. + +```typescript +const yesterday = Date.now() / 1000 - 24 * 60 * 60; +``` + +In JavaScript (and therefore TypeScript), time is stored as milliseconds since [the beginning of the epoch](https://en.wikipedia.org/wiki/Unix_time). +In UNIX, and therefore in Ethereum, time is stored as seconds since that same point. +This is the timestamp 24 hours ago. + +```typescript + const filters: DozerSyncFilter[] = [ +``` + +We create the filters for the tables we're interested in. + +```typescript + selectFrom({ + table: mudConfig.tables.app__Tasks, + where: `"createdAt" > ${yesterday}`, + }), +``` + +From the `app__Tasks` table we only want entries created in the last 24 hours. +To verify that the filter works as expected you can later change the code to only look for entries older than 24 hours. + +```typescript + { table: mudConfig.tables.app__Creator }, + ]; +``` + +We also want the `app__Counter` table. + +```typescript +const { initialBlockLogs } = await getSnapshot({ + dozerUrl, + storeAddress: networkConfig.worldAddress as Hex, + filters, + chainId: networkConfig.chainId, +}); +``` + +Get the initial snapshot to hydrate (fill with initial information) the data store. +Note that this snapshot does not have the actual data, but the events that created it. + +```typescript +const liveSyncFilters = filters.map((filter) => ({ + tableId: filter.table.tableId, +})); +``` + +The synchronization filters are a lot more limited. +[You can read the description of these filters here](/guides/hello-world/filter-sync#filtering). + +```typescript + const { ... } = await syncToZustand({ + initialBlockLogs, + filters: liveSyncFilters, + ... + }); +``` + +Finally, we provide `initialBlockLogs` for the hydration and `filters` for the updates to the synchronization function (either `syncToRecs` or `syncToZustand`). + +
diff --git a/docs/pages/state-query/dozer.mdx b/docs/pages/state-query/dozer.mdx index ede697887f..d83b4df384 100644 --- a/docs/pages/state-query/dozer.mdx +++ b/docs/pages/state-query/dozer.mdx @@ -628,278 +628,3 @@ console.log(queryResult.result[0].records); The actual records. - -## Dozer with MUD synchronization - -You can also add dozer to a MUD client to speed up the initial hydration. - -### Create a client to access the `World` - -These are the steps to create a client that can access the `World`. - -1. Create and run a react template application. - - ```sh copy - pnpm create mud@latest tasks --template react - cd tasks - pnpm dev - ``` - -1. [Browse to the application](http://localhost:3000/?chainId=17069&worldAddress=0x95f5d049b014114e2feeb5d8d994358ce4ffd06e). - The URL specifies the `chainId` and `worldAddress` for the `World`. - -1. In MUD DevTools see your account address and [fund it on Garnet](https://garnetchain.com/faucet). - You may need to get test ETH for your own address, and then transfer it to the account address the application uses. - -1. You can now create, complete, and delete tasks. - -1. To see the content of the `app__Creator` table, edit `packages/contracts/mud.config.ts` to add the `Creator` table definition. - - - - ```typescript filename="mud.config.ts" copy showLineNumbers {15-21} - import { defineWorld } from "@latticexyz/world"; - - export default defineWorld({ - namespace: "app", - tables: { - Tasks: { - schema: { - id: "bytes32", - createdAt: "uint256", - completedAt: "uint256", - description: "string", - }, - key: ["id"], - }, - Creator: { - schema: { - id: "bytes32", - taskCreator: "address", - }, - key: ["id"], - }, - }, - }); - ``` - - - -### Updating the client to use dozer - -The main purpose of dozer is to allow MUD clients to specify the subset of table records that a client needs, instead of synchronizing whole tables. - -To update the client, you change `packages/client/src/mud/setupNetwork.ts` to: - - - -```typescript filename="setupNetwork.ts" copy showLineNumbers {17, 80-97, 106-107} -/* - * The MUD client code is built on top of viem - * (https://viem.sh/docs/getting-started.html). - * This line imports the functions we need from it. - */ -import { - createPublicClient, - fallback, - webSocket, - http, - createWalletClient, - Hex, - ClientConfig, - getContract, -} from "viem"; - -import { DozerSyncFilter, getSnapshot, selectFrom } from "@latticexyz/store-sync/dozer"; - -import { syncToZustand } from "@latticexyz/store-sync/zustand"; -import { getNetworkConfig } from "./getNetworkConfig"; -import IWorldAbi from "contracts/out/IWorld.sol/IWorld.abi.json"; -import { createBurnerAccount, transportObserver, ContractWrite } from "@latticexyz/common"; -import { transactionQueue, writeObserver } from "@latticexyz/common/actions"; -import { Subject, share } from "rxjs"; - -/* - * Import our MUD config, which includes strong types for - * our tables and other config options. We use this to generate - * things like RECS components and get back strong types for them. - * - * See https://mud.dev/templates/typescript/contracts#mudconfigts - * for the source of this information. - */ -import mudConfig from "contracts/mud.config"; - -export type SetupNetworkResult = Awaited>; - -export async function setupNetwork() { - const networkConfig = await getNetworkConfig(); - - /* - * Create a viem public (read only) client - * (https://viem.sh/docs/clients/public.html) - */ - const clientOptions = { - chain: networkConfig.chain, - transport: transportObserver(fallback([webSocket(), http()])), - pollingInterval: 1000, - } as const satisfies ClientConfig; - - const publicClient = createPublicClient(clientOptions); - - /* - * Create an observable for contract writes that we can - * pass into MUD dev tools for transaction observability. - */ - const write$ = new Subject(); - - /* - * Create a temporary wallet and a viem client for it - * (see https://viem.sh/docs/clients/wallet.html). - */ - const burnerAccount = createBurnerAccount(networkConfig.privateKey as Hex); - const burnerWalletClient = createWalletClient({ - ...clientOptions, - account: burnerAccount, - }) - .extend(transactionQueue()) - .extend(writeObserver({ onWrite: (write) => write$.next(write) })); - - /* - * Create an object for communicating with the deployed World. - */ - const worldContract = getContract({ - address: networkConfig.worldAddress as Hex, - abi: IWorldAbi, - client: { public: publicClient, wallet: burnerWalletClient }, - }); - - const dozerUrl = "https://dozer.mud.garnetchain.com/q"; - const yesterday = Date.now() / 1000 - 24 * 60 * 60; - const filters: DozerSyncFilter[] = [ - selectFrom({ - table: mudConfig.tables.app__Tasks, - where: `"createdAt" > ${yesterday}`, - }), - { table: mudConfig.tables.app__Creator }, - ]; - const { initialBlockLogs } = await getSnapshot({ - dozerUrl, - storeAddress: networkConfig.worldAddress as Hex, - filters, - chainId: networkConfig.chainId, - }); - const liveSyncFilters = filters.map((filter) => ({ - tableId: filter.table.tableId, - })); - - /* - * Sync on-chain state into RECS and keeps our client in sync. - * Uses the MUD indexer if available, otherwise falls back - * to the viem publicClient to make RPC calls to fetch MUD - * events from the chain. - */ - const { tables, useStore, latestBlock$, storedBlockLogs$, waitForTransaction } = await syncToZustand({ - initialBlockLogs, - filters: liveSyncFilters, - config: mudConfig, - address: networkConfig.worldAddress as Hex, - publicClient, - startBlock: BigInt(networkConfig.initialBlockNumber), - }); - - return { - tables, - useStore, - publicClient, - walletClient: burnerWalletClient, - latestBlock$, - storedBlockLogs$, - waitForTransaction, - worldContract, - write$: write$.asObservable().pipe(share()), - }; -} -``` - - - -
- -Explanation - -```typescript -import { DozerSyncFilter, getSnapshot, selectFrom } from "@latticexyz/store-sync/dozer"; -``` - -Import the dozer definitions we need. - -```typescript -const dozerUrl = "https://dozer.mud.garnetchain.com/q"; -``` - -The URL for the dozer service. -This is simplified testing code, on a production system this will probably be a lookup table based on the chainId. - -```typescript -const yesterday = Date.now() / 1000 - 24 * 60 * 60; -``` - -In JavaScript (and therefore TypeScript), time is stored as milliseconds since [the beginning of the epoch](https://en.wikipedia.org/wiki/Unix_time). -In UNIX, and therefore in Ethereum, time is stored as seconds since that same point. -This is the timestamp 24 hours ago. - -```typescript - const filters: DozerSyncFilter[] = [ -``` - -We create the filters for the tables we're interested in. - -```typescript - selectFrom({ - table: mudConfig.tables.app__Tasks, - where: `"createdAt" > ${yesterday}`, - }), -``` - -From the `app__Tasks` table we only want entries created in the last 24 hours. -To verify that the filter works as expected you can later change the code to only look for entries older than 24 hours. - -```typescript - { table: mudConfig.tables.app__Creator }, - ]; -``` - -We also want the `app__Counter` table. - -```typescript -const { initialBlockLogs } = await getSnapshot({ - dozerUrl, - storeAddress: networkConfig.worldAddress as Hex, - filters, - chainId: networkConfig.chainId, -}); -``` - -Get the initial snapshot to hydrate (fill with initial information) the data store. -Note that this snapshot does not have the actual data, but the events that created it. - -```typescript -const liveSyncFilters = filters.map((filter) => ({ - tableId: filter.table.tableId, -})); -``` - -The synchronization filters are a lot more limited. -[You can read the description of these filters here](/guides/hello-world/filter-sync#filtering). - -```typescript - const { ... } = await syncToZustand({ - initialBlockLogs, - filters: liveSyncFilters, - ... - }); -``` - -Finally, we provide `initialBlockLogs` for the hydration and `filters` for the updates to the synchronization function (either `syncToRecs` or `syncToZustand`). - -
From 03b764103db94a474361235421ddb7ef45f6453b Mon Sep 17 00:00:00 2001 From: Ori Pomerantz Date: Tue, 17 Sep 2024 16:49:34 -0500 Subject: [PATCH 27/28] types of sync --- docs/pages/guides/hello-world/dozer.mdx | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/pages/guides/hello-world/dozer.mdx b/docs/pages/guides/hello-world/dozer.mdx index d88351b184..549c1af13c 100644 --- a/docs/pages/guides/hello-world/dozer.mdx +++ b/docs/pages/guides/hello-world/dozer.mdx @@ -225,7 +225,8 @@ This is the timestamp 24 hours ago. const filters: DozerSyncFilter[] = [ ``` -We create the filters for the tables we're interested in. +We create the dozer filter for the tables we're interested in. +This is the _dozer_ filter, so it is only used for the initial hydration of the client. ```typescript selectFrom({ @@ -242,7 +243,7 @@ To verify that the filter works as expected you can later change the code to onl ]; ``` -We also want the `app__Counter` table. +We also want the entire `app__Counter` table. ```typescript const { initialBlockLogs } = await getSnapshot({ @@ -262,8 +263,8 @@ const liveSyncFilters = filters.map((filter) => ({ })); ``` -The synchronization filters are a lot more limited. -[You can read the description of these filters here](/guides/hello-world/filter-sync#filtering). +The live synchronization filters are used after the initial hydration, and keep up with changes on the blockchain. +These synchronization filters are a lot more limited, [you can read the description of these filters here](/guides/hello-world/filter-sync#filtering). ```typescript const { ... } = await syncToZustand({ From 9056439306ba72922a899c6ab63d3381a24a7257 Mon Sep 17 00:00:00 2001 From: Ori Pomerantz Date: Tue, 17 Sep 2024 20:34:37 -0500 Subject: [PATCH 28/28] add callout for sync types --- docs/pages/guides/hello-world/dozer.mdx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/pages/guides/hello-world/dozer.mdx b/docs/pages/guides/hello-world/dozer.mdx index 549c1af13c..0c8de04db8 100644 --- a/docs/pages/guides/hello-world/dozer.mdx +++ b/docs/pages/guides/hello-world/dozer.mdx @@ -1,7 +1,10 @@ import { CollapseCode } from "../../../components/CollapseCode"; +import FilterTypes from "../../../components/common-text/FilterTypes.mdx"; # Add dozer hydration + + In this tutorial you learn how to add dozer hydration to an existing MUD application, such as the ones created by the template. To avoid running dozer locally, we use a `World` on Garnet at address [`0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e`](https://explorer.garnetchain.com/address/0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e) that runs a slightly modified version of [the React template](https://github.com/latticexyz/mud/tree/main/templates/react). You can see the data schema for the `World` [in the block explorer](https://explorer.garnetchain.com/address/0x95F5d049B014114E2fEeB5d8d994358Ce4FFd06e?tab=mud).