diff --git a/CHANGELOG.md b/CHANGELOG.md index a8f52b38c0..968274efb8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,7 +71,7 @@ Changes are grouped as follows This release ensure that all CogniteResources have `.dump` and `.load` methods, and that calling these two methods in sequence produces an equal object to the original, for example, `my_asset == Asset.load(my_asset.dump(camel_case=True)`. In addition, this ensures that the output of all `.dump` -methods are `json` and `yaml` serializable. +methods are `json` and `yaml` serializable. Additionally, the default for `camel_case` has been changed to `True`. ### Improved - Read operations, like `retrieve_multiple` will now fast-fail. Previously, all requests would be executed @@ -93,6 +93,8 @@ methods are `json` and `yaml` serializable. and replaced by properties with the same names. ### Changed +- All `.dump` methods now uses `camel_case=True` by default. This is to match the intended use case, preparing the + object to be sent in an API request. - `CogniteResource.to_pandas` now more closely resembles `CogniteResourceList.to_pandas` with parameters `expand_metadata` and `metadata_prefix`, instead of accepting a sequence of column names (`expand`) to expand, with no easy way to add a prefix. Also, it no longer expands metadata by default. diff --git a/MIGRATION_GUIDE.md b/MIGRATION_GUIDE.md index 82144845a6..38a441937f 100644 --- a/MIGRATION_GUIDE.md +++ b/MIGRATION_GUIDE.md @@ -39,6 +39,8 @@ Changes are grouped as follows: - In data class `Transformation` attributes `has_source_oidc_credentials` and `has_destination_oidc_credentials` are replaced by properties. ### Function Signature +- All `.dump` methods for CogniteResource classes like `Asset` or `Event` now uses `camel_case=True` by default. This is + to match the intended use case, preparing the object to be sent in an API request. - `CogniteResource.to_pandas` now more closely resembles `CogniteResourceList.to_pandas` with parameters `expand_metadata` and `metadata_prefix`, instead of accepting a sequence of column names (`expand`) to expand, with no easy way to add a prefix. Also, it no longer expands metadata by default. diff --git a/cognite/client/_api/assets.py b/cognite/client/_api/assets.py index 63526108be..6f75735787 100644 --- a/cognite/client/_api/assets.py +++ b/cognite/client/_api/assets.py @@ -296,10 +296,10 @@ def aggregate_count( Count the number of assets with the metadata key "timezone" in your CDF project: >>> from cognite.client import CogniteClient - >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.filters import ContainsAny >>> from cognite.client.data_classes.assets import AssetProperty >>> c = CogniteClient() - >>> has_timezone = filters.ContainsAny(AssetProperty.metadata, "timezone") + >>> has_timezone = ContainsAny(AssetProperty.metadata, "timezone") >>> asset_count = c.assets.aggregate_count(advanced_filter=has_timezone) """ @@ -340,11 +340,13 @@ def aggregate_cardinality_values( Count the number of timezones (metadata key) for assets with the word "critical" in the description in your CDF project: >>> from cognite.client import CogniteClient - >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.filters import Search >>> from cognite.client.data_classes.assets import AssetProperty >>> c = CogniteClient() - >>> is_critical = filters.Search(AssetProperty.description, "critical") - >>> critical_assets = c.assets.aggregate_cardinality_values(AssetProperty.metadata_key("timezone"), advanced_filter=is_critical) + >>> is_critical = Search(AssetProperty.description, "critical") + >>> critical_assets = c.assets.aggregate_cardinality_values( + ... AssetProperty.metadata_key("timezone"), + ... advanced_filter=is_critical) """ self._validate_filter(advanced_filter) return self._advanced_aggregate( @@ -425,12 +427,12 @@ def aggregate_unique_values( Get the different labels with count used for assets created after 2020-01-01 in your CDF project: >>> from cognite.client import CogniteClient - >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes import filters as flt >>> from cognite.client.data_classes.assets import AssetProperty >>> from cognite.client.utils import timestamp_to_ms >>> from datetime import datetime >>> c = CogniteClient() - >>> created_after_2020 = filters.Range(AssetProperty.created_time, gte=timestamp_to_ms(datetime(2020, 1, 1))) + >>> created_after_2020 = flt.Range(AssetProperty.created_time, gte=timestamp_to_ms(datetime(2020, 1, 1))) >>> result = c.assets.aggregate_unique_values(AssetProperty.labels, advanced_filter=created_after_2020) >>> print(result.unique) @@ -439,10 +441,11 @@ def aggregate_unique_values( >>> from cognite.client import CogniteClient >>> from cognite.client.data_classes.assets import AssetProperty - >>> from cognite.client.data_classes import aggregations as aggs, filters + >>> from cognite.client.data_classes import aggregations as aggs + >>> from cognite.client.data_classes import filters as flt >>> c = CogniteClient() >>> not_test = aggs.Not(aggs.Prefix("test")) - >>> created_after_2020 = filters.Range(AssetProperty.last_updated_time, gte=timestamp_to_ms(datetime(2020, 1, 1))) + >>> created_after_2020 = flt.Range(AssetProperty.last_updated_time, gte=timestamp_to_ms(datetime(2020, 1, 1))) >>> result = c.assets.aggregate_unique_values(AssetProperty.labels, advanced_filter=created_after_2020, aggregate_filter=not_test) >>> print(result.unique) @@ -839,12 +842,10 @@ def filter( and sort by external id ascending: >>> from cognite.client import CogniteClient - >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes import filters as flt >>> c = CogniteClient() - >>> f = filters - >>> in_timezone = f.Prefix(["metadata", "timezone"], "Europe") - >>> res = c.assets.filter(filter=in_timezone, - ... sort=("external_id", "asc")) + >>> in_timezone = flt.Prefix(["metadata", "timezone"], "Europe") + >>> res = c.assets.filter(filter=in_timezone, sort=("external_id", "asc")) Note that you can check the API documentation above to see which properties you can filter on with which filters. @@ -853,13 +854,13 @@ def filter( for filtering and sorting, you can also use the `AssetProperty` and `SortableAssetProperty` Enums. >>> from cognite.client import CogniteClient - >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes import filters as flt >>> from cognite.client.data_classes.assets import AssetProperty, SortableAssetProperty >>> c = CogniteClient() - >>> f = filters - >>> in_timezone = f.Prefix(AssetProperty.metadata_key("timezone"), "Europe") - >>> res = c.assets.filter(filter=in_timezone, - ... sort=(SortableAssetProperty.external_id, "asc")) + >>> in_timezone = flt.Prefix(AssetProperty.metadata_key("timezone"), "Europe") + >>> res = c.assets.filter( + ... filter=in_timezone, + ... sort=(SortableAssetProperty.external_id, "asc")) """ self._validate_filter(filter) diff --git a/cognite/client/_api/data_modeling/instances.py b/cognite/client/_api/data_modeling/instances.py index 3a808e81f2..adc22c6f82 100644 --- a/cognite/client/_api/data_modeling/instances.py +++ b/cognite/client/_api/data_modeling/instances.py @@ -292,30 +292,30 @@ def retrieve( >>> from cognite.client import CogniteClient >>> c = CogniteClient() - >>> res = c.data_modeling.instances.retrieve(nodes=("mySpace", "myNodeExternalId"), - ... edges=("mySpace", "myEdgeExternalId"), - ... sources=("mySpace", "myViewExternalId", "myViewVersion") - ... ) + >>> res = c.data_modeling.instances.retrieve( + ... nodes=("mySpace", "myNodeExternalId"), + ... edges=("mySpace", "myEdgeExternalId"), + ... sources=("mySpace", "myViewExternalId", "myViewVersion")) Retrieve nodes an edges using the built in data class >>> from cognite.client import CogniteClient >>> from cognite.client.data_classes.data_modeling import NodeId, EdgeId, ViewId >>> c = CogniteClient() - >>> res = c.data_modeling.instances.retrieve(NodeId("mySpace", "myNode"), - ... EdgeId("mySpace", "myEdge"), - ... ViewId("mySpace", "myViewExternalId", "myViewVersion") - ... ) + >>> res = c.data_modeling.instances.retrieve( + ... NodeId("mySpace", "myNode"), + ... EdgeId("mySpace", "myEdge"), + ... ViewId("mySpace", "myViewExternalId", "myViewVersion")) Retrieve nodes an edges using the the view object as source >>> from cognite.client import CogniteClient >>> from cognite.client.data_classes.data_modeling import NodeId, EdgeId >>> c = CogniteClient() - >>> res = c.data_modeling.instances.retrieve(NodeId("mySpace", "myNode"), - ... EdgeId("mySpace", "myEdge"), - ... sources=("myspace", "myView") - ... ) + >>> res = c.data_modeling.instances.retrieve( + ... NodeId("mySpace", "myNode"), + ... EdgeId("mySpace", "myEdge"), + ... sources=("myspace", "myView")) """ identifiers = self._load_node_and_edge_ids(nodes, edges) other_params = self._create_other_params( diff --git a/cognite/client/_api/datapoints.py b/cognite/client/_api/datapoints.py index 10b79e3177..0b934e44d6 100644 --- a/cognite/client/_api/datapoints.py +++ b/cognite/client/_api/datapoints.py @@ -584,8 +584,10 @@ def retrieve( In order to retrieve millions of datapoints as efficiently as possible, here are a few guidelines: 1. For best speed, and significantly lower memory usage, consider using ``retrieve_arrays(...)`` which uses ``numpy.ndarrays`` for data storage. - 2. Only unlimited queries with (``limit=None``) are fetched in parallel so specifying a large finite ``limit`` like 1 million, comes with severe performance penalty as data is fetched serially. - 3. Try to avoid specifying `start` and `end` to be very far from the actual data: If you have data from 2000 to 2015, don't set start=0 (1970). + 2. Unlimited queries (``limit=None``) are most performant as they are always fetched in parallel, for any number of requested time series. + 3. Limited queries, (e.g. ``limit=200_000``) are much less performant, at least for large limits, as each individual time series is fetched serially + (we can't predict where on the timeline the datapoints lie). Thus parallelisation is only used when asking for multiple "limited" time series. + 4. Try to avoid specifying `start` and `end` to be very far from the actual data: If you have data from 2000 to 2015, don't use start=0 (1970). Args: id (None | int | dict[str, Any] | Sequence[int | dict[str, Any]]): Id, dict (with id) or (mixed) sequence of these. See examples below. diff --git a/cognite/client/_api/datapoints_subscriptions.py b/cognite/client/_api/datapoints_subscriptions.py index 04b1a3c00e..4eb602e6ff 100644 --- a/cognite/client/_api/datapoints_subscriptions.py +++ b/cognite/client/_api/datapoints_subscriptions.py @@ -57,13 +57,16 @@ def create(self, subscription: DataPointSubscriptionCreate) -> DatapointSubscrip >>> from cognite.client import CogniteClient >>> from cognite.client.data_classes import DataPointSubscriptionCreate - >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes import filters as flt >>> from cognite.client.data_classes.datapoints_subscriptions import DatapointSubscriptionFilterProperties >>> c = CogniteClient() - >>> f = filters - >>> p = DatapointSubscriptionFilterProperties - >>> numeric_timeseries = f.Equals(p.is_string, False) - >>> sub = DataPointSubscriptionCreate("mySubscription", partition_count=1, filter=numeric_timeseries, name="My subscription for Numeric time series") + >>> prop = DatapointSubscriptionFilterProperties.is_string + >>> numeric_timeseries = flt.Equals(prop, False) + >>> sub = DataPointSubscriptionCreate( + ... "mySubscription", + ... partition_count=1, + ... filter=numeric_timeseries, + ... name="My subscription for Numeric time series") >>> created = c.time_series.subscriptions.create(sub) """ self._warning.warn() diff --git a/cognite/client/_api/events.py b/cognite/client/_api/events.py index 0e11625bbd..3f932d9437 100644 --- a/cognite/client/_api/events.py +++ b/cognite/client/_api/events.py @@ -643,13 +643,12 @@ def filter( and sort by start time descending: >>> from cognite.client import CogniteClient - >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes import filters as flt >>> c = CogniteClient() - >>> f = filters - >>> is_workorder = f.Prefix("external_id", "workorder") - >>> has_failure = f.Search("description", "failure") - >>> res = c.events.filter(filter=f.And(is_workorder, has_failure), - ... sort=("start_time", "desc")) + >>> is_workorder = flt.Prefix("external_id", "workorder") + >>> has_failure = flt.Search("description", "failure") + >>> res = c.events.filter( + ... filter=flt.And(is_workorder, has_failure), sort=("start_time", "desc")) Note that you can check the API documentation above to see which properties you can filter on with which filters. @@ -658,14 +657,14 @@ def filter( for filtering and sorting, you can also use the `EventProperty` and `SortableEventProperty` enums. >>> from cognite.client import CogniteClient - >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes import filters as flt >>> from cognite.client.data_classes.events import EventProperty, SortableEventProperty >>> c = CogniteClient() - >>> f = filters - >>> is_workorder = f.Prefix(EventProperty.external_id, "workorder") - >>> has_failure = f.Search(EventProperty.description, "failure") - >>> res = c.events.filter(filter=f.And(is_workorder, has_failure), - ... sort=(SortableEventProperty.start_time, "desc")) + >>> is_workorder = flt.Prefix(EventProperty.external_id, "workorder") + >>> has_failure = flt.Search(EventProperty.description, "failure") + >>> res = c.events.filter( + ... filter=flt.And(is_workorder, has_failure), + ... sort=(SortableEventProperty.start_time, "desc")) """ self._validate_filter(filter) diff --git a/cognite/client/_api/sequences.py b/cognite/client/_api/sequences.py index a9d8319667..6d77db4b60 100644 --- a/cognite/client/_api/sequences.py +++ b/cognite/client/_api/sequences.py @@ -585,8 +585,9 @@ def update( >>> from cognite.client.data_classes import SequenceUpdate, SequenceColumn >>> c = CogniteClient() >>> - >>> column_def = [SequenceColumn(value_type ="String",external_id="user", description ="some description"), - ... SequenceColumn(value_type="Double", external_id="amount")] + >>> column_def = [ + ... SequenceColumn(value_type ="String",external_id="user", description ="some description"), + ... SequenceColumn(value_type="Double", external_id="amount")] >>> my_update = SequenceUpdate(id=1).columns.add(column_def) >>> res = c.sequences.update(my_update) @@ -732,12 +733,11 @@ def filter( return them sorted by created time: >>> from cognite.client import CogniteClient - >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes import filters as flt >>> c = CogniteClient() - >>> f = filters - >>> is_asset = f.Equals("asset_id", 123) - >>> is_efficiency = f.Equals(["metadata", "type"], "efficiency") - >>> res = c.time_series.filter(filter=f.And(is_asset, is_efficiency), sort="created_time") + >>> asset_filter = flt.Equals("asset_id", 123) + >>> is_efficiency = flt.Equals(["metadata", "type"], "efficiency") + >>> res = c.time_series.filter(filter=flt.And(asset_filter, is_efficiency), sort="created_time") Note that you can check the API documentation above to see which properties you can filter on with which filters. @@ -746,14 +746,14 @@ def filter( for filtering and sorting, you can also use the `SequenceProperty` and `SortableSequenceProperty` enums. >>> from cognite.client import CogniteClient - >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes import filters as flt >>> from cognite.client.data_classes.sequences import SequenceProperty, SortableSequenceProperty >>> c = CogniteClient() - >>> f = filters - >>> is_asset = f.Equals(SequenceProperty.asset_id, 123) - >>> is_efficiency = f.Equals(SequenceProperty.metadata_key("type"), "efficiency") - >>> res = c.time_series.filter(filter=f.And(is_asset, is_efficiency), - ... sort=SortableSequenceProperty.created_time) + >>> asset_filter = flt.Equals(SequenceProperty.asset_id, 123) + >>> is_efficiency = flt.Equals(SequenceProperty.metadata_key("type"), "efficiency") + >>> res = c.time_series.filter( + ... filter=flt.And(asset_filter, is_efficiency), + ... sort=SortableSequenceProperty.created_time) """ self._validate_filter(filter) diff --git a/cognite/client/_api/time_series.py b/cognite/client/_api/time_series.py index 093239d11a..5bbcb0f395 100644 --- a/cognite/client/_api/time_series.py +++ b/cognite/client/_api/time_series.py @@ -728,10 +728,9 @@ def filter( Find all numeric time series and return them sorted by external id: >>> from cognite.client import CogniteClient - >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.filters import Equals >>> c = CogniteClient() - >>> f = filters - >>> is_numeric = f.Equals("is_string", False) + >>> is_numeric = Equals("is_string", False) >>> res = c.time_series.filter(filter=is_numeric, sort="external_id") Note that you can check the API documentation above to see which properties you can filter on @@ -741,11 +740,10 @@ def filter( for filtering and sorting, you can also use the `TimeSeriesProperty` and `SortableTimeSeriesProperty` enums. >>> from cognite.client import CogniteClient - >>> from cognite.client.data_classes import filters + >>> from cognite.client.data_classes.filters import Equals >>> from cognite.client.data_classes.time_series import TimeSeriesProperty, SortableTimeSeriesProperty >>> c = CogniteClient() - >>> f = filters - >>> is_numeric = f.Equals(TimeSeriesProperty.is_string, False) + >>> is_numeric = Equals(TimeSeriesProperty.is_string, False) >>> res = c.time_series.filter(filter=is_numeric, sort=SortableTimeSeriesProperty.external_id) """ self._validate_filter(filter) diff --git a/cognite/client/data_classes/workflows.py b/cognite/client/data_classes/workflows.py index ce592e88c2..d44ee69e74 100644 --- a/cognite/client/data_classes/workflows.py +++ b/cognite/client/data_classes/workflows.py @@ -152,7 +152,7 @@ class FunctionTaskParameters(WorkflowTaskParameters): ... "workflow_data": "${workflow.input}", ... "task1_input": "${task1.input}", ... "task1_output": "${task1.output}" - ... }, + ... }, ... ), ... ) """ diff --git a/docs/source/_static/empty b/docs/source/_static/empty deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docs/source/_templates/empty b/docs/source/_templates/empty deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docs/source/conf.py b/docs/source/conf.py index 82970cf978..3ee27b6a5a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -33,7 +33,7 @@ autosectionlabel_prefix_document = True # Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] +templates_path = [] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: @@ -93,7 +93,7 @@ # Add any paths that contain custom _static files (such as style sheets) here, # relative to this directory. They are copied after the builtin _static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] +html_static_path = [] # Custom sidebar templates, must be a dictionary that maps document names # to template names.