Skip to content

Commit

Permalink
Backport docs to config schemas (#369)
Browse files Browse the repository at this point in the history
* Backport docs to config schemas

* Bump version
  • Loading branch information
einarmo authored Jan 30, 2024
1 parent 73a78dc commit 09886ef
Show file tree
Hide file tree
Showing 9 changed files with 271 additions and 109 deletions.
13 changes: 11 additions & 2 deletions schema/base_config.schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,10 @@
},
"type": {
"type": "string",
"enum": ["local", "remote"],
"enum": [
"local",
"remote"
],
"default": "local",
"description": "Configuration file type. Either `local`, meaning the full config is loaded from this file, or `remote`, which means that only the `cognite` section is loaded from this file, and the rest is loaded from extraction pipelines."
},
Expand All @@ -29,7 +32,13 @@
"state-store": {
"$ref": "state_store_config.schema.json",
"unevaluatedProperties": false
},
"key-vault": {
"$ref": "key_vault_config.schema.json",
"unevaluatedProperties": false
}
},
"required": ["version"]
"required": [
"version"
]
}
162 changes: 98 additions & 64 deletions schema/cognite_config.schema.json

Large diffs are not rendered by default.

45 changes: 32 additions & 13 deletions schema/high_availability.schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,49 +2,68 @@
"$id": "high_availability.schema.json",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"type": "object",
"description": "Configuration to allow you to run multiple redundant extractors. Each extractor needs a unique index.",
"description": "Configuration for running the extractor with a rudimentary form of redundancy. Multiple extractors on different machines are on standby, with one actively extracting from the source. Each extractor must have a unique `index`.",
"properties": {
"index": {
"type": "integer",
"description": "Unique index of this extractor. Each redundant extractor must have a unique index",
"description": "Unique index of this extractor. Indices must be unique, or high availability will not work correctly.",
"minimum": 0
},
"raw": {
"type": "object",
"description": "Configuration to use Raw as backend for high availability",
"description": "Use the CDF staging area as a shared store for the extractor. This configuration must be the same for each redundant extractor.",
"properties": {
"database-name": {
"type": "string",
"description": "Raw database to store high availability states in"
"description": "Name of the database in CDF."
},
"table-name": {
"type": "string",
"description": "Raw table to store high availability states in"
"description": "Name of the table in CDF."
}
},
"required": ["database-name", "table-name"],
"required": [
"database-name",
"table-name"
],
"unevaluatedProperties": false
},
"redis": {
"type": "object",
"description": "Configuration to use a Redis store as backend for high availability",
"description": "Use a redis store as shared state for the extractor. This configuration must be the same for each redundant extractor.",
"properties": {
"connection-string": {
"type": "string",
"description": "Connection string to connect to redis instance"
"description": "Redis connection string.",
"examples": [
"HOST_NAME:PORT_NUMBER,password=PASSWORD"
]
},
"table-name": {
"type": "string",
"description": "Redis table name to store high availability states in"
"description": "Redis table name."
}
},
"required": ["connection-string", "table-name"],
"required": [
"connection-string",
"table-name"
],
"unevaluatedProperties": false
}
},
"oneOf": [
{ "required": ["raw"] },
{ "required": ["redis"] }
{
"required": [
"raw"
]
},
{
"required": [
"redis"
]
}
],
"required": ["index"]
"required": [
"index"
]
}
35 changes: 35 additions & 0 deletions schema/key_vault_config.schema.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
{
"$id": "state_store_config.schema.json",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"type": "object",
"description": "Configure access to an Azure KeyVault instance. If this is configured, you can use the `!keyvault` tag on entries to dynamically replace values with secrets from KeyVault when the configuration is loaded. For example: `secret: !keyvault my-secret`.",
"properties": {
"keyvault-name": {
"type": "string",
"description": "Enter the name of the Azure KeyVault to use.",
"examples": [
"my-keyvault"
]
},
"authentication-method": {
"type": "string",
"description": "Method used to authenticate with KeyVault. If this is set to `client-secret`, both `client-id` and `secret` are required."
},
"tenant-id": {
"type": "string",
"description": "Enter the Azure tenant containing the KeyVault."
},
"client-id": {
"type": "string",
"description": "Client ID of the service principal used to access the KeyVault."
},
"secret": {
"type": "string",
"description": "Client secret for the service principal used to access the KeyVault."
}
},
"required": [
"tenant-id",
"authentication-method"
]
}
60 changes: 49 additions & 11 deletions schema/logger_config.schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,24 +2,40 @@
"$id": "logger_config.schema.json",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"type": "object",
"description": "Configuration for logging to console or file.",
"description": "Configuration for logging to console or file. Log entries are either `Fatal`, `Error`, `Warning`, `Information`, `Debug`, or `Verbose`, in order of decreasing priority. The extractor will log any messages at an equal or higher log level than the configured level for each sink.",
"properties": {
"console": {
"type": "object",
"description": "Configuration for logging to the console.",
"properties": {
"level": {
"type": "string",
"description": "Minimum level of log events to write to the console.",
"enum": ["verbose", "debug", "information", "warning", "error", "fatal"]
"description": "Minimum level of log events to write to the console. If not present, or invalid, logging to console is disabled.",
"enum": [
"verbose",
"debug",
"information",
"warning",
"error",
"fatal"
]
},
"stderr-level": {
"type": "string",
"description": "Log events at this level or above are redirected to standard error.",
"enum": ["verbose", "debug", "information", "warning", "error", "fatal"]
"enum": [
"verbose",
"debug",
"information",
"warning",
"error",
"fatal"
]
}
},
"required": ["level"],
"required": [
"level"
],
"unevaluatedProperties": false
},
"file": {
Expand All @@ -29,7 +45,14 @@
"level": {
"type": "string",
"description": "Minimum level of log events to write to file.",
"enum": ["verbose", "debug", "information", "warning", "error", "fatal"]
"enum": [
"verbose",
"debug",
"information",
"warning",
"error",
"fatal"
]
},
"path": {
"type": "string",
Expand All @@ -43,12 +66,18 @@
},
"rolling-interval": {
"type": "string",
"enum": ["day", "hour"],
"description": "Rolling interval for log files. Either `day` or `hour`",
"enum": [
"day",
"hour"
],
"description": "Rolling interval for log files.",
"default": "day"
}
},
"required": ["level", "path"],
"required": [
"level",
"path"
],
"unevaluatedProperties": false
},
"trace-listener": {
Expand All @@ -57,11 +86,20 @@
"properties": {
"level": {
"type": "string",
"enum": ["verbose", "debug", "information", "warning", "error", "fatal"],
"enum": [
"verbose",
"debug",
"information",
"warning",
"error",
"fatal"
],
"description": "Level to output trace messages at"
}
},
"required": ["level"],
"required": [
"level"
],
"unevaluatedProperties": false
}
}
Expand Down
31 changes: 21 additions & 10 deletions schema/metrics_config.schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,40 +2,48 @@
"$id": "metrics_config.schema.json",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"type": "object",
"description": "Configuration for prometheus metrics destinations.",
"description": "Configuration for publishing metrics.",
"properties": {
"server": {
"type": "object",
"description": "Configuration for a prometheus scrape server.",
"description": "Configuration for having the extractor start a Prometheus scrape server on a local port.",
"properties": {
"host": {
"type": "string",
"description": "Host name for the server.",
"examples": ["localhost"]
"description": "Host name for local Prometheus server, must be exposed to some prometheus instance for scraping.",
"examples": [
"localhost",
"0.0.0.0"
]
},
"port": {
"type": "integer",
"description": "Port to host the prometheus scrape server on"
"description": "The port used for a local Prometheus server."
}
},
"required": ["host", "port"],
"required": [
"host",
"port"
],
"unevaluatedProperties": false
},
"push-gateways": {
"type": "array",
"uniqueItems": true,
"description": "A list of push gateway destinations to push metrics to",
"description": "A list of pushgateway destinations to push metrics to. The extractor will automatically push metrics to each of these.",
"items": {
"type": "object",
"properties": {
"host": {
"type": "string",
"description": "URI of the pushgateway host",
"examples": ["http://localhost:9091"]
"examples": [
"http://my.pushgateway:9091"
]
},
"job": {
"type": "string",
"description": "Name of the job"
"description": "Name of the Prometheus pushgateway job."
},
"username": {
"type": "string",
Expand All @@ -51,7 +59,10 @@
"description": "Interval in seconds between each push to the gateway"
}
},
"required": ["host", "job"],
"required": [
"host",
"job"
],
"unevaluatedProperties": false
}
}
Expand Down
18 changes: 14 additions & 4 deletions schema/retry_config.schema.json
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
{
"$id": "retry_config.schema.json",
"description": "Retry configuration",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"description": "Configuration for retries towards the source.",
"type": "object",
"properties": {
"timeout": {
"type": [ "string", "integer" ],
"type": [
"string",
"integer"
],
"description": "Global timeout. After this much time has passed, new retries will not be created. Set this to zero for no timeout. Syntax is `N[timeUnit]` where `timeUnit` is `d`, `h`, `m`, `s` or `ms`",
"default": "0s"
},
Expand All @@ -14,12 +18,18 @@
"default": 5
},
"max-delay": {
"type": [ "string", "integer" ],
"type": [
"string",
"integer"
],
"description": "Maximum delay between attempts, incremented using exponential backoff. Set this to 0 for no upper limit. Syntax is `N[timeUnit]` where `timeUnit` is `d`, `h`, `m`, `s` or `ms`",
"default": "0s"
},
"initial-delay": {
"type": [ "string", "integer" ],
"type": [
"string",
"integer"
],
"description": "Initial delay used for exponential backoff. Time between each retry is calculated as `min(max-delay, initial-delay * 2 ^ retry)`, where 0 is treated as infinite for `max-delay`. The maximum delay is about 10 minutes (13 retries). Syntax is `N[timeUnit]` where `timeUnit` is `d`, `h`, `m`, `s` or `ms`",
"default": "500ms"
}
Expand Down
14 changes: 10 additions & 4 deletions schema/state_store_config.schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,24 @@
"$id": "state_store_config.schema.json",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"type": "object",
"description": "Store state in a local database or in CDF raw to start the extractor more efficiently",
"description": "Use a local LiteDb database or a set of tables in CDF RAW to store persistent information between runs. This can be used to avoid loading large volumes of data from CDF on startup, which can greatly speed up the extractor.",
"properties": {
"location": {
"type": "string",
"description": "Path to .db file, or name of raw database containing buffer."
"description": "Path to .db file used for storage, or name of a CDF RAW database."
},
"database": {
"type": "string",
"enum": ["None", "LiteDb", "Raw"],
"enum": [
"None",
"LiteDb",
"Raw"
],
"default": "None",
"description": "Which type of database to use."
}
},
"required": ["location"]
"required": [
"location"
]
}
2 changes: 1 addition & 1 deletion version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.18.1
1.18.2

0 comments on commit 09886ef

Please sign in to comment.