diff --git a/schema/base_config.schema.json b/schema/base_config.schema.json index 30ed6882..54eaa9b7 100644 --- a/schema/base_config.schema.json +++ b/schema/base_config.schema.json @@ -18,7 +18,7 @@ "remote" ], "default": "local", - "description": "Configuration file type. Either `local`, meaning the full config is loaded from this file, or `remote`, which means that only the `cognite` section is loaded from this file, and the rest is loaded from extraction pipelines." + "description": "Configuration file type. The `local` option loads the full config from this file, while the `remote` option loads only the `cognite` section from this file and the rest from extraction pipelines." }, "cognite": { "$ref": "cognite_config.schema.json", diff --git a/schema/cognite_config.schema.json b/schema/cognite_config.schema.json index eb6c4e91..96ad3412 100644 --- a/schema/cognite_config.schema.json +++ b/schema/cognite_config.schema.json @@ -19,7 +19,7 @@ }, "client-id": { "type": "string", - "description": "Enter the service principal client id from the IdP." + "description": "Enter the service principal client ID from the IdP." }, "tenant": { "type": "string", @@ -43,20 +43,20 @@ }, "scopes": { "type": "array", - "description": "Enter a list of scopes requested for the token", + "description": "Enter the list of scopes requested for the token.", "items": { "type": "string", - "description": "A scope requested for the token" + "description": "The scope requested for the token" } }, "min-ttl": { "type": "integer", "default": 30, - "description": "Insert the minimum time in seconds a token will be valid. If the cached token expires in less than `min-ttl` seconds, it will be refreshed even if it is still valid." + "description": "Insert the minimum time in seconds a token will be valid. If the cached token expires in less than `min-ttl` seconds, the system will refresh the token, even if it's still valid." }, "certificate": { "type": "object", - "description": "Authenticate with a client certificate", + "description": "Authenticate with a client certificate.", "unevaluatedProperties": false, "required": [ "path" @@ -68,7 +68,7 @@ }, "path": { "type": "string", - "description": "Enter the path to the .pem or .pfx certificate to be used for authentication" + "description": "Enter the path to the .pem or .pfx certificate to be used for authentication." }, "password": { "type": "string", @@ -102,11 +102,11 @@ ] }, "data-set": { - "description": "Enter a data set the extractor should write data into", + "description": "Enter a data set the extractor should write data into.", "$ref": "either_id.schema.json" }, "extraction-pipeline": { - "description": "Enter the extraction pipeline used for remote config and reporting statuses", + "description": "Enter the extraction pipeline used for remote config and reporting statuses.", "$ref": "either_id.schema.json" }, "host": { @@ -116,12 +116,12 @@ }, "timeout": { "type": "integer", - "description": "Enter the timeout on requests to CDF, in seconds.", + "description": "Enter the timeout on requests to CDF in seconds.", "default": 30 }, "external-id-prefix": { "type": "string", - "description": "Prefix on external ID used when creating CDF resources" + "description": "Prefix on external ID used when creating CDF resources." }, "connection": { "type": "object", @@ -130,7 +130,7 @@ "disable-gzip": { "type": "boolean", "default": false, - "description": "Whether or not to disable gzipping of json bodies." + "description": "Set to `true` to disable gzipping of json bodies." }, "status-forcelist": { "type": "string", @@ -139,34 +139,34 @@ "max-retries": { "type": "integer", "default": 10, - "description": "Max number of retries on a given http request." + "description": "Maximum number of retries on a given HTTP request." }, "max-retries-connect": { "type": "integer", "default": 3, - "description": "Max number of retries on connection errors." + "description": "Maximum number of retries on connection errors." }, "max-retry-backoff": { "type": "integer", "default": 30, - "description": "Retry strategy employs exponential backoff. This parameter sets a max on the amount of backoff after any request failure." + "description": "Retry strategy employs exponential backoff. This parameter sets a maximum amount of backoff after any request failure." }, "max-connection-pool-size": { "type": "integer", "default": 50, - "description": "The maximum number of connections which will be kept in the SDKs connection pool." + "description": "The maximum number of connections in the SDKs connection pool." }, "disable-ssl": { "type": "boolean", "default": false, - "description": "Whether or not to disable SSL verification." + "description": "Set to `true` to disable SSL verification." }, "proxies": { "type": "object", - "description": "Dictionary mapping from protocol to url.", + "description": "Dictionary mapping from protocol to URL.", "items": { "type": "string", - "description": "Provide protocol as key and value as the corresponding url" + "description": "Provide protocol as key and value as the corresponding URL." } } } diff --git a/schema/either_id.schema.json b/schema/either_id.schema.json index a588b186..53634eb7 100644 --- a/schema/either_id.schema.json +++ b/schema/either_id.schema.json @@ -7,11 +7,11 @@ "properties": { "id": { "type": "integer", - "description": "Resource internal id" + "description": "Resource internal ID" }, "external-id": { "type": "string", - "description": "Resource external id" + "description": "Resource external ID" } } } diff --git a/schema/logging_config.schema.json b/schema/logging_config.schema.json index 9632cd38..a5c2a5c3 100644 --- a/schema/logging_config.schema.json +++ b/schema/logging_config.schema.json @@ -11,7 +11,7 @@ "properties": { "level": { "type": "string", - "description": "Select the verbosity level for console logging. Valid options, in decreasing verbosity levels, are `DEBUG`, `INFO`, `WARNING`, `ERROR`, and `CRITICAL`.", + "description": "Select the verbosity level for console logging. To reduce the verbosity levels, use `DEBUG`, `INFO`, `WARNING`, `ERROR`, or `CRITICAL`.", "enum": [ "DEBUG", "INFO", @@ -33,7 +33,7 @@ "properties": { "level": { "type": "string", - "description": "Select the verbosity level for file logging. Valid options, in decreasing verbosity levels, are `DEBUG`, `INFO`, `WARNING`, `ERROR`, and `CRITICAL`.", + "description": "Select the verbosity level for file logging. To reduce the verbosity levels, use `DEBUG`, `INFO`, `WARNING`, `ERROR`, or `CRITICAL`.", "enum": [ "DEBUG", "INFO", @@ -49,14 +49,14 @@ }, "retention": { "type": "integer", - "description": "Specify the number of days to keep logs for.", + "description": "Specify the number of days to keep logs.", "default": 7 } } }, "metrics": { "type": "boolean", - "description": "Enables metrics on the number of log messages recorded per logger and level. This requires `metrics` to be configured as well" + "description": "Enables metrics on the number of log messages recorded per logger and level. Configure `metrics` to retrieve the logs." } } } diff --git a/schema/metrics_config.schema.json b/schema/metrics_config.schema.json index 76d9324a..8254312f 100644 --- a/schema/metrics_config.schema.json +++ b/schema/metrics_config.schema.json @@ -1,39 +1,39 @@ { "$id": "metrics_config.schema.json", "$schema": "https://json-schema.org/draft/2020-12/schema", - "description": "The `metrics` section describes where to send metrics on extractor performance for remote monitoring of the extractor. We recommend sending metrics to a [Prometheus pushgateway](https://prometheus.io/), but you can also send metrics as time series in the CDF project.", + "description": "The `metrics` section describes where to send metrics on extractor performance for remote monitoring of the extractor. We recommend sending metrics to a [Prometheus pushgateway](https://prometheus.io/). You can also send metrics as time series in the CDF project.", "type": "object", "properties": { "push-gateways": { "type": "array", - "description": "List of prometheus pushgateway configurations", + "description": "List of Prometheus pushgateway configurations", "items": { "type": "object", - "description": "The push-gateways sections contain a list of metric destinations.", + "description": "The `push-gateways` sections contain a list of metric destinations.", "unevaluatedProperties": false, "properties": { "host": { "type": "string", - "description": "Enter the address of the host to push metrics to." + "description": "Enter the host's address to push metrics." }, "job-name": { "type": "string", - "description": "Enter the value of the `exported_job` label to associate metrics with. This separates several deployments on a single pushgateway, and should be unique." + "description": "Enter the value of the `exported_job` label to associate metrics. This separates several deployments on a single pushgateway and should be unique." }, "username": { "type": "string", - "description": "Enter the credentials for the pushgateway." + "description": "Enter the username for the pushgateway." }, "password": { "type": "string", - "description": "Enter the credentials for the pushgateway." + "description": "Enter the password for the pushgateway." }, "clear-after": { "type": [ "null", "integer" ], - "description": "Enter the number of seconds to wait before clearing the pushgateway. When this parameter is present, the extractor will stall after the run is complete before deleting all metrics from the pushgateway. The recommended value is at least twice that of the scrape interval on the pushgateway. This is to ensure that the last metrics are gathered before the deletion. Default is disabled." + "description": "Set a wait time in seconds before clearing the pushgateway. When this parameter is present, the extractor will stall after the run is complete before deleting all metrics from the pushgateway. The recommended value is at least twice that of the scrape interval on the pushgateway to ensure that the last metrics are gathered before the deletion. By default, this feature is disabled." }, "push-interval": { "type": "integer", @@ -45,7 +45,7 @@ }, "cognite": { "type": "object", - "description": "Push metrics to CDF timeseries. Requires CDF credentials to be configured", + "description": "Push metrics to CDF time series. Configure CDF credentials.", "unevaluatedProperties": false, "required": [ "external-id-prefix" @@ -61,32 +61,32 @@ }, "asset-external-id": { "type": "string", - "description": "Enter the external ID for a CDF asset that will have all the metrics time series attached to it." + "description": "Enter the external ID for a CDF asset with all the metrics time series attached to it." }, "push-interval": { "type": "integer", - "description": "Enter the interval in seconds between each push to CDF", + "description": "Enter the interval in seconds between each push to CDF.", "default": 30 }, "data-set": { - "description": "Data set the metrics will be created under", + "description": "The data set where the metrics will be created.", "$ref": "either_id.schema.json" } } }, "server": { "type": "object", - "description": "The extractor can also be configured to expose a HTTP server with prometheus metrics for scraping", + "description": "Configure the extractor to expose an HTTP server with Prometheus metrics for scraping.", "unevaluatedProperties": false, "properties": { "host": { "type": "string", - "description": "Host to run the prometheus server on", + "description": "Host to run the Prometheus server.", "default": "0.0.0.0" }, "port": { "type": "integer", - "description": "Local port to expose the prometheus server on", + "description": "Local port to expose the Prometheus server.", "default": 9000 } } diff --git a/schema/state_store_config.schema.json b/schema/state_store_config.schema.json index 156636a7..1eff5f21 100644 --- a/schema/state_store_config.schema.json +++ b/schema/state_store_config.schema.json @@ -1,7 +1,7 @@ { "$id": "state_store_config.schema.json", "$schema": "https://json-schema.org/draft/2020-12/schema", - "description": "Include the state store section to save extraction states between runs. Use this if data is loaded incrementally. We support multiple state stores, but you can only configure one at a time.", + "description": "Include the state store section to save extraction states between runs. Use a state store if data is loaded incrementally. We support multiple state stores, but you can only configure one at a time.", "type": "object", "properties": { "raw": {