From 94d13385297c47ecb024b9f65290afe7a0bfaaaf Mon Sep 17 00:00:00 2001 From: William Harrison <87287585+wdhdev@users.noreply.github.com> Date: Sat, 9 Nov 2024 19:26:40 +0800 Subject: [PATCH] feat(ci): validation --- .github/CODEOWNERS | 6 +- .../{validate.yml => validation.yml} | 17 +- dnsconfig.js | 22 +- domains/sandbox.json | 14 +- package.json | 9 + tests/domains.test.js | 40 +++ tests/json.test.js | 107 +++++++ tests/records.test.js | 290 ++++++++++++++++++ utils/functions.js | 63 ++++ 9 files changed, 538 insertions(+), 30 deletions(-) rename .github/workflows/{validate.yml => validation.yml} (67%) create mode 100644 package.json create mode 100644 tests/domains.test.js create mode 100644 tests/json.test.js create mode 100644 tests/records.test.js create mode 100644 utils/functions.js diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a4788b0509..6b9d689e61 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,8 +1,4 @@ -* @phenax @wdhdev +* @wdhdev -/.github/ @wdhdev /domains/ @is-a-dev/maintainers - *.md @is-a-dev/maintainers -/LICENSE @phenax -/dnsconfig.js @wdhdev diff --git a/.github/workflows/validate.yml b/.github/workflows/validation.yml similarity index 67% rename from .github/workflows/validate.yml rename to .github/workflows/validation.yml index 0ca0890ec9..716a6a3efa 100644 --- a/.github/workflows/validate.yml +++ b/.github/workflows/validation.yml @@ -7,7 +7,9 @@ on: branches: [main] paths: - "domains/*" - - ".github/workflows/validation.yml" + - "tests/*" + - "utils/*" + - ".github/workflows/validate.yml" - "dnsconfig.js" workflow_dispatch: @@ -28,15 +30,12 @@ jobs: with: args: check - json: - name: JSON + tests: + name: Tests runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: JSON Syntax Check - uses: limitusus/json-syntax-check@v2 - with: - pattern: "\\.json$" - env: - BASE: "domains/" + - run: npm install + + - run: npm test diff --git a/dnsconfig.js b/dnsconfig.js index 6d169f44d6..c8f21c7930 100644 --- a/dnsconfig.js +++ b/dnsconfig.js @@ -75,15 +75,18 @@ for (var subdomain in domains) { // Handle DS records if (domainData.record.DS) { - records.push( - DS( - subdomainName, - domainData.record.DS.key_tag, - domainData.record.DS.algorithm, - domainData.record.DS.digest_type, - domainData.record.DS.digest - ) - ); + for (var ds in domainData.record.DS) { + var dsRecord = domainData.record.DS[ds]; + records.push( + DS( + subdomainName, + dsRecord.key_tag, + dsRecord.algorithm, + dsRecord.digest_type, + dsRecord.digest + ) + ); + } } // Handle MX records @@ -151,7 +154,6 @@ var options = { var ignored = [ IGNORE("@", "MX,TXT"), - IGNORE("\\*"), IGNORE("_acme-challenge", "TXT"), IGNORE("_autodiscover._tcp", "SRV"), IGNORE("_dmarc", "TXT"), diff --git a/domains/sandbox.json b/domains/sandbox.json index 46f4fbc1e4..7c9301fc24 100644 --- a/domains/sandbox.json +++ b/domains/sandbox.json @@ -9,11 +9,13 @@ "blue.foundationdns.net", "blue.foundationdns.org" ], - "DS": { - "key_tag": 2371, - "algorithm": 13, - "digest_type": 2, - "digest": "023DD50C657C5F2471728B76127008F244CFB45F32AA0CE1978C0182D363EF12" - } + "DS": [ + { + "key_tag": 2371, + "algorithm": 13, + "digest_type": 2, + "digest": "023DD50C657C5F2471728B76127008F244CFB45F32AA0CE1978C0182D363EF12" + } + ] } } diff --git a/package.json b/package.json new file mode 100644 index 0000000000..858f3ddd51 --- /dev/null +++ b/package.json @@ -0,0 +1,9 @@ +{ + "devDependencies": { + "ava": "^6.2.0", + "fs-extra": "^11.2.0" + }, + "scripts": { + "test": "npx ava tests/*.test.js" + } +} diff --git a/tests/domains.test.js b/tests/domains.test.js new file mode 100644 index 0000000000..08af7bc1de --- /dev/null +++ b/tests/domains.test.js @@ -0,0 +1,40 @@ +const t = require("ava"); +const fs = require("fs-extra"); +const path = require("path"); + +const domainsPath = path.resolve("domains"); +const files = fs.readdirSync(domainsPath); + +// Nested subdomains should not exist if the parent subdomain does not exist +t("Nested subdomains should not exist without a parent subdomain", (t) => { + files.forEach((file) => { + const subdomain = file.replace(".json", ""); + + if (subdomain.split(".").length > 1) { + const parentSubdomain = subdomain.split(".").pop(); + + t.true( + files.includes(`${parentSubdomain}.json`), + `${file}: Parent subdomain does not exist` + ); + } + }); + + t.pass(); +}); + +// Nested subdomains should not exist if the parent subdomain has NS records +t("Nested subdomains should not exist if the parent subdomain has NS records", (t) => { + files.forEach((file) => { + const subdomain = file.replace(".json", ""); + + if (subdomain.split(".").length > 1) { + const parentSubdomain = subdomain.split(".").pop(); + const parentDomain = fs.readJsonSync(path.join(domainsPath, `${parentSubdomain}.json`)); + + t.is(parentDomain.record.NS, undefined, `${file}: Parent subdomain has NS records`); + } + }); + + t.pass(); +}); diff --git a/tests/json.test.js b/tests/json.test.js new file mode 100644 index 0000000000..e715c0e64a --- /dev/null +++ b/tests/json.test.js @@ -0,0 +1,107 @@ +const t = require("ava"); +const fs = require("fs-extra"); +const path = require("path"); + +const requiredFields = { + owner: "object", + record: "object", +}; + +const optionalFields = { + proxied: "boolean", + reserved: "boolean", +}; + +const requiredOwnerFields = { + username: "string", +}; + +const optionalOwnerFields = { + email: "string", +}; + +const emailRegex = /^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$/; +const hostnameRegex = + /^(?=.{1,253}$)(?:(?:[_a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)\.)+[a-zA-Z]{2,63}$/; + +const domainsPath = path.resolve("domains"); +const files = fs.readdirSync(domainsPath); + +const validateRequiredFields = (t, obj, requiredFields, file) => { + Object.keys(requiredFields).forEach((key) => { + t.true(obj.hasOwnProperty(key), `${file}: Missing required field: ${key}`); + t.is( + typeof obj[key], + requiredFields[key], + `${file}: Field ${key} should be of type ${requiredFields[key]}` + ); + }); +}; + +const validateOptionalFields = (t, obj, optionalFields, file) => { + Object.keys(optionalFields).forEach((key) => { + if (obj.hasOwnProperty(key)) { + t.is( + typeof obj[key], + optionalFields[key], + `${file}: Field ${key} should be of type ${optionalFields[key]}` + ); + } + }); +}; + +// Ensure all files are valid JSON +t("All files should be valid JSON", (t) => { + files.forEach((file) => { + t.notThrows(() => fs.readJsonSync(path.join(domainsPath, file)), `${file}: Invalid JSON file`); + }); +}); + +// Ensure all files have the required fields +t("All files should have valid file names", (t) => { + files.forEach((file) => { + t.true(file.endsWith(".json"), `${file}: File does not have .json extension`); + t.false(file.includes(".is-a.dev"), `${file}: File name should not contain .is-a.dev`); + + // Ignore root domain + if (file !== "@.json") { + t.regex( + file.replace(/\.json$/, "") + ".is-a.dev", + hostnameRegex, + `${file}: FQDN must be 1-253 characters, use letters, numbers, dots, or hyphens, and not start or end with a hyphen.` + ); + } + }); +}); + +// Ensure all files have the required fields +t("All files should have the required fields", (t) => { + files.forEach((file) => { + const data = fs.readJsonSync(path.join(domainsPath, file)); + + validateRequiredFields(t, data, requiredFields, file); + validateRequiredFields(t, data.owner, requiredOwnerFields, file); + + if (!data.reserved) { + t.true(Object.keys(data.record).length > 0, `${file}: No record types found`); + } + }); +}); + +// Validate the optional fields +t("All files should have valid optional fields", (t) => { + files.forEach((file) => { + const data = fs.readJsonSync(path.join(domainsPath, file)); + + validateOptionalFields(t, data, optionalFields, file); + validateOptionalFields(t, data.owner, optionalOwnerFields, file); + + if (data.owner.email) { + t.regex( + data.owner.email, + emailRegex, + `${file}: Owner email should be a valid email address` + ); + } + }); +}); diff --git a/tests/records.test.js b/tests/records.test.js new file mode 100644 index 0000000000..7a598a4b21 --- /dev/null +++ b/tests/records.test.js @@ -0,0 +1,290 @@ +const t = require("ava"); +const fs = require("fs-extra"); +const path = require("path"); + +const { expandIPv6, isPublicIPv4, isPublicIPv6 } = require("../utils/functions"); + +const validRecordTypes = ["A", "AAAA", "CAA", "CNAME", "DS", "MX", "NS", "SRV", "TXT", "URL"]; + +const hostnameRegex = + /^(?=.{1,253}$)(?:(?:[_a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)\.)+[a-zA-Z]{2,63}$/; +const ipv4Regex = + /^(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])(\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])){3}$/; +const ipv6Regex = + /^(?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}$|^::(?:[0-9a-fA-F]{1,4}:){0,6}[0-9a-fA-F]{1,4}$|^(?:[0-9a-fA-F]{1,4}:){1,7}:$|^(?:[0-9a-fA-F]{1,4}:){0,6}::(?:[0-9a-fA-F]{1,4}:){0,5}[0-9a-fA-F]{1,4}$/; + +const domainsPath = path.resolve("domains"); +const files = fs.readdirSync(domainsPath); + +// Validate the record object key names +t("All files should have valid record types", (t) => { + files.forEach((file) => { + const data = fs.readJsonSync(path.join(domainsPath, file)); + + const recordKeys = Object.keys(data.record); + + recordKeys.forEach((key) => { + t.true(validRecordTypes.includes(key), `${file}: Invalid record type: ${key}`); + }); + + // CNAME records cannot be combined with any other record type + if (recordKeys.includes("CNAME")) { + t.is(recordKeys.length, Number(1), `${file}: CNAME records cannot be combined with other records`); + } + + // NS records cannot be combined with any other record type, except for DS records + if (recordKeys.includes("NS")) { + t.true( + recordKeys.length === 1 || recordKeys.length === 2 && recordKeys.includes("DS"), + `${file}: NS records cannot be combined with other records, except for DS records` + ); + } + + // DS records must be combined with NS records + if (recordKeys.includes("DS")) { + t.true( + recordKeys.includes("NS"), + `${file}: DS records must be combined with NS records` + ); + } + }); +}); + +// Ensure there are no duplicate keys in the record object +t("All files should not have duplicate record keys", (t) => { + files.forEach((file) => { + const data = fs.readJsonSync(path.join(domainsPath, file)); + + const recordKeys = Object.keys(data.record); + const uniqueRecordKeys = new Set(recordKeys); + + t.is(recordKeys.length, uniqueRecordKeys.size, `${file}: Duplicate record keys found`); + }); +}); + +// Validate the values of the record object's keys +t("All files should have valid record values", (t) => { + files.forEach((file) => { + const data = fs.readJsonSync(path.join(domainsPath, file)); + + Object.keys(data.record).forEach((key) => { + const value = data.record[key]; + + // These records must be an array of strings + if (["A", "AAAA", "MX", "NS"].includes(key)) { + t.true(Array.isArray(value), `${file}: Record value should be an array for ${key}`); + + value.forEach((record) => { + t.true( + typeof record === "string", + `${file}: Record value should be a string for ${key}` + ); + }); + + if (key === "A") { + value.forEach((record) => { + t.regex( + record, + ipv4Regex, + `${file}: Record value should be a valid IPv4 address for ${key} at index ${value.indexOf( + record + )}` + ); + + t.true( + isPublicIPv4(record, data.proxied), + `${file}: Record value should be a public IPv4 address for ${key} at index ${value.indexOf( + record + )}` + ); + }); + } + + if (key === "AAAA") { + value.forEach((record) => { + t.regex( + expandIPv6(record), + ipv6Regex, + `${file}: Record value should be a valid IPv6 address for ${key} at index ${value.indexOf( + record + )}` + ); + + t.true( + isPublicIPv6(record), + `${file}: Record value should be a public IPv6 address for ${key} at index ${value.indexOf( + record + )}` + ); + }); + } + + if (["MX", "NS"].includes(key)) { + value.forEach((record) => { + t.regex( + record, + hostnameRegex, + `${file}: Record value should be a valid hostname for ${key} at index ${value.indexOf( + record + )}` + ); + }); + } + } + + // These records must be strings + if (["CNAME", "URL"].includes(key)) { + t.true( + typeof value === "string", + `${file}: Record value should be a string for ${key}` + ); + + if (key === "CNAME") { + t.regex( + value, + hostnameRegex, + `${file}: Record value should be a valid hostname for ${key}` + ); + } + + if (key === "URL") { + try { + new URL(value); + } catch (error) { + t.fail(`${file}: Record value should be a valid URL for ${key}`); + } + } + } + + // These records must be arrays of objects + if (["CAA", "DS", "SRV"].includes(key)) { + t.true(Array.isArray(value), `${file}: Record value should be an array for ${key}`); + + value.forEach((record) => { + t.true( + typeof record === "object", + `${file}: Record value should be an object for ${key} at index ${value.indexOf( + record + )}` + ); + }); + + if (key === "CAA") { + value.forEach((record) => { + t.true( + typeof record.flags === "number", + `${file}: CAA record value should have a number for flags at index ${value.indexOf( + record + )}` + ); + + t.true( + typeof record.tag === "string", + `${file}: CAA record value should have a string for tag at index ${value.indexOf( + record + )}` + ); + + t.true( + typeof record.value === "string", + `${file}: CAA record value should have a string for value at index ${value.indexOf( + record + )}` + ); + }); + } + + if (key === "DS") { + value.forEach((record) => { + t.true( + typeof record.key_tag === "number", + `${file}: DS record value should have a number for key_tag at index ${value.indexOf( + record + )}` + ); + + t.true( + typeof record.algorithm === "number", + `${file}: DS record value should have a number for algorithm at index ${value.indexOf( + record + )}` + ); + + t.true( + typeof record.digest_type === "number", + `${file}: DS record value should have a number for digest_type at index ${value.indexOf( + record + )}` + ); + + t.true( + typeof record.digest === "string", + `${file}: DS record value should have a string for digest at index ${value.indexOf( + record + )}` + ); + }); + } + + if (key === "SRV") { + value.forEach((record) => { + t.true( + typeof record.priority === "number", + `${file}: SRV record value should have a number for priority at index ${value.indexOf( + record + )}` + ); + + t.true( + typeof record.weight === "number", + `${file}: SRV record value should have a number for weight at index ${value.indexOf( + record + )}` + ); + + t.true( + typeof record.port === "number", + `${file}: SRV record value should have a number for port at index ${value.indexOf( + record + )}` + ); + + t.true( + typeof record.target === "string", + `${file}: SRV record value should have a string for target at index ${value.indexOf( + record + )}` + ); + + t.regex( + value.target, + hostnameRegex, + `${file}: SRV record value should be a valid hostname for target at index ${value.indexOf( + record + )}` + ); + }); + } + } + + // TXT records must be either a string or array of strings + if (key === "TXT") { + if (Array.isArray(value)) { + value.forEach((record) => { + t.true( + typeof record === "string", + `${file}: Record value should be a string for ${key} at index ${value.indexOf( + record + )}` + ); + }); + } else { + t.true( + typeof value === "string", + `${file}: Record value should be a string for ${key}` + ); + } + } + }); + }); +}); diff --git a/utils/functions.js b/utils/functions.js new file mode 100644 index 0000000000..ab00df1ee7 --- /dev/null +++ b/utils/functions.js @@ -0,0 +1,63 @@ +module.exports.expandIPv6 = function (ip) { + // Split into segments by ":" + let segments = ip.split(":"); + + // Count empty segments due to "::" shorthand + const emptyIndex = segments.indexOf(""); + if (emptyIndex !== -1) { + // Calculate how many "0000" are missing + const missingSegments = 7 - segments.filter((seg) => seg).length; + segments.splice(emptyIndex, 1, ...Array(missingSegments + 1).fill("0000")); + } + + // Expand each segment to 4 characters, padding with leading zeros + const expandedSegments = segments.map((segment) => segment.padStart(4, "0")); + + // Join segments back together + return expandedSegments.join(":"); +}; + +module.exports.isPublicIPv4 = function (ip, proxied) { + const parts = ip.split('.').map(Number); + + // Validate IPv4 address format + if (parts.length !== 4 || parts.some(part => isNaN(part) || part < 0 || part > 255)) { + return false; + } + + // Exception for 192.0.2.1, assuming the domain is proxied + if (ip === "192.0.2.1" && proxied) { + return true; + } + + // Check for private and reserved IPv4 ranges + return !( + // Private ranges + parts[0] === 10 || + (parts[0] === 172 && parts[1] >= 16 && parts[1] <= 31) || + (parts[0] === 192 && parts[1] === 168) || + + // Reserved or special-use ranges + (parts[0] === 100 && parts[1] >= 64 && parts[1] <= 127) || // Carrier-grade NAT + (parts[0] === 169 && parts[1] === 254) || // Link-local + (parts[0] === 192 && parts[1] === 0 && parts[2] === 0) || // IETF Protocol Assignments + (parts[0] === 192 && parts[1] === 0 && parts[2] === 2) || // Documentation (TEST-NET-1) + (parts[0] === 198 && parts[1] === 18) || // Network Interconnect Devices + (parts[0] === 198 && parts[1] === 51 && parts[2] === 100) || // Documentation (TEST-NET-2) + (parts[0] === 203 && parts[1] === 0 && parts[2] === 113) || // Documentation (TEST-NET-3) + (parts[0] >= 224) // Multicast and reserved ranges + ); +}; + +module.exports.isPublicIPv6 = function (ip) { + const normalizedIP = ip.toLowerCase(); + + // Check for private or special-use IPv6 ranges + return !( + normalizedIP.startsWith("fc") || // Unique Local Address (ULA) + normalizedIP.startsWith("fd") || // Unique Local Address (ULA) + normalizedIP.startsWith("fe80") || // Link-local + normalizedIP.startsWith("::1") || // Loopback address (::1) + normalizedIP.startsWith("2001:db8") // Documentation range + ); +};