diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1cd6c16..e7af6ea 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -49,3 +49,27 @@ jobs: name: Release ${{ steps.tag_version.outputs.new_tag }} body: ${{ steps.tag_version.outputs.changelog }} generateReleaseNotes: true + mkdocs: + runs-on: ubuntu-22.04 + needs: release + permissions: + contents: write + steps: + - uses: actions/checkout@v4 + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: 3.12 + - name: Install the latest version of uv and set the python version to 3.12 + uses: astral-sh/setup-uv@v4 + with: + python-version: 3.12 + - name: Install dependencies + run: uv pip install .[docs] + - name: Build mkdocs site + run: mkdocs build --clean + - name: Deploy to GitHub Pages + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./site diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 222c3fd..7264b7f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,6 +7,8 @@ repos: - id: check-ast - id: check-toml - id: check-yaml + args: + - --unsafe #- id: check-json - id: check-case-conflict - id: check-merge-conflict diff --git a/README.md b/README.md index c583ed2..57a2fe3 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,6 @@ from focoos import Focoos focoos = Focoos(api_key=os.getenv("FOCOOS_API_KEY")) model = focoos.get_remote_model("focoos_object365") -model.deploy() detections = model.infer("./image.jpg", threshold=0.4) ``` diff --git a/docs/api/focoos.md b/docs/api/focoos.md new file mode 100644 index 0000000..2166ffc --- /dev/null +++ b/docs/api/focoos.md @@ -0,0 +1 @@ +::: focoos.focoos diff --git a/docs/api/local_model.md b/docs/api/local_model.md new file mode 100644 index 0000000..3959422 --- /dev/null +++ b/docs/api/local_model.md @@ -0,0 +1 @@ +::: focoos.local_model diff --git a/docs/api/remote_model.md b/docs/api/remote_model.md new file mode 100644 index 0000000..475ab1e --- /dev/null +++ b/docs/api/remote_model.md @@ -0,0 +1 @@ +::: focoos.remote_model diff --git a/docs/api/runtime.md b/docs/api/runtime.md new file mode 100644 index 0000000..1c315e5 --- /dev/null +++ b/docs/api/runtime.md @@ -0,0 +1 @@ +::: focoos.runtime diff --git a/docs/assets/favicon.svg b/docs/assets/favicon.svg new file mode 100644 index 0000000..803222f --- /dev/null +++ b/docs/assets/favicon.svg @@ -0,0 +1,3 @@ + diff --git a/docs/assets/logo.svg b/docs/assets/logo.svg new file mode 100644 index 0000000..bfaf9f6 --- /dev/null +++ b/docs/assets/logo.svg @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/assets/stylesheets/custom.css b/docs/assets/stylesheets/custom.css new file mode 100644 index 0000000..02249d9 --- /dev/null +++ b/docs/assets/stylesheets/custom.css @@ -0,0 +1,22 @@ +/* Light mode (default scheme) */ +[data-md-color-scheme="default"] { + --md-primary-fg-color: #63dca7; + --md-primary-bg-color: #000000; + --md-accent-fg-color: #025ee6; + --md-typeset-a-color: #025ee6; +} +[data-md-color-scheme=slate][data-md-color-primary=indigo] { + --md-typeset-a-color: #63dca7; +} +/* Dark mode (slate scheme) */ +[data-md-color-scheme="slate"] { + --md-primary-fg-color: #025ee6; + --md-primary-bg-color: #000000; + --md-accent-fg-color: #63dca7; + --md-typeset-a-color: #63dca7; +} + +/* Override the first child of md-header__topic */ +.md-header__topic:first-child { + font-weight: 350; /* Adjust to a normal font weight, 400 or another value */ +} diff --git a/docs/datasets.md b/docs/datasets.md new file mode 100644 index 0000000..0f019ca --- /dev/null +++ b/docs/datasets.md @@ -0,0 +1,28 @@ +# Datasets + +With the Focoos SDK, you can leverage a diverse collection of foundational datasets specifically tailored for computer vision tasks. These datasets, spanning tasks such as segmentation, detection, and instance segmentation, provide a strong foundation for building and optimizing models across a variety of domains. + +--- + +Datasets: + +| Name | Task | Description | Layout | +|----------------------|------------|-------------------------------------------------------------------------|-------------------| +| Aeroscapes | semseg | A drone dataset to recognize many classes! | supervisely | +| Blister | instseg | A dataset to find blisters | roboflow_coco | +| Boxes | detection | Finding different boxes on the conveyor belt | roboflow_coco | +| Cable | detection | A dataset for detecting damages in cables (from Roboflow 100) - | roboflow_coco | +| Circuit dataset | detection | A dataset with electronic circuits | roboflow_coco | +| Concrete | instseg | A dataset to find defect in concrete | roboflow_coco | +| Crack Segmentation | instseg | A dataset for segmenting cracks in buildings with 4k images. | roboflow_coco | +| Football-detection | detection | Football-detection by Roboflow | roboflow_coco | +| Peanuts | detection | Finding Molded or non Molded Peanuts | roboflow_coco | +| Strawberries | instseg | Finding defects on strawberries | roboflow_coco | +| aquarium | detection | aquarium | roboflow_coco | +| bottles | detection | bottles | roboflow_coco | +| chess_pieces | detection | A chess detector dataset by roboflow | roboflow_coco | +| coco_2017_det | detection | COCO Detection | catalog | +| halo | detection | Halo fps by Roboflow | roboflow_coco | +| lettuce | detection | A dataset to find lettuce | roboflow_coco | +| safety | detection | From roboflow Universe: | roboflow_coco | +| screw | detection | Screw by Roboflow | roboflow_coco | diff --git a/docs/development/changelog.md b/docs/development/changelog.md new file mode 100644 index 0000000..4e11c72 --- /dev/null +++ b/docs/development/changelog.md @@ -0,0 +1 @@ +{% include-markdown "helpers/wip.md" %} diff --git a/docs/development/code_of_conduct.md b/docs/development/code_of_conduct.md new file mode 100644 index 0000000..4e11c72 --- /dev/null +++ b/docs/development/code_of_conduct.md @@ -0,0 +1 @@ +{% include-markdown "helpers/wip.md" %} diff --git a/docs/development/contributing.md b/docs/development/contributing.md new file mode 100644 index 0000000..4e11c72 --- /dev/null +++ b/docs/development/contributing.md @@ -0,0 +1 @@ +{% include-markdown "helpers/wip.md" %} diff --git a/docs/getting_started/installation.md b/docs/getting_started/installation.md new file mode 100644 index 0000000..03d9e8e --- /dev/null +++ b/docs/getting_started/installation.md @@ -0,0 +1,48 @@ +# Installation + +The focoos SDK provides flexibility for installation based on the execution environment you plan to use. The package supports `CPU`, `NVIDIA GPU`, and `NVIDIA GPU with TensorRT` environments. Please note that only one execution environment should be selected during installation. + +## Requirements + +For **local inference**, ensure that you have CUDA 12 and cuDNN 9 installed, as they are required for onnxruntime version 1.20.1. + +To install cuDNN 9: + +```bash linenums="0" +apt-get -y install cudnn9-cuda-12 +``` + +To perform inference using TensorRT, ensure you have TensorRT version 10.5 installed. + +## Installation Options + +* CPU Environment + +If you plan to run the SDK on a CPU-only environment: + +```bash linenums="0" +pip install 'focoos[cpu] @ git+https://github.com/FocoosAI/focoos.git' +``` + +* NVIDIA GPU Environment + +For execution using NVIDIA GPUs (with ONNX Runtime GPU support): + +```bash linenums="0" +pip install 'focoos[gpu] @ git+https://github.com/FocoosAI/focoos.git' +``` + +* NVIDIA GPU with TensorRT + +For optimized execution using NVIDIA GPUs with TensorRT: + +```bash linenums="0" +pip install 'focoos[tensorrt] @ git+https://github.com/FocoosAI/focoos.git' +``` + +!!! note + 🛠️ **Installation Tip:** If you want to install a specific version, for example `v0.1.3`, use: + ```bash + pip install 'focoos[tensorrt] @ git+https://github.com/FocoosAI/focoos.git@v0.1.3' + ``` + 📋 **Check Versions:** Visit [https://github.com/FocoosAI/focoos/tags](https://github.com/FocoosAI/focoos/tags) for available versions. diff --git a/docs/getting_started/introduction.md b/docs/getting_started/introduction.md new file mode 100644 index 0000000..008d674 --- /dev/null +++ b/docs/getting_started/introduction.md @@ -0,0 +1,7 @@ +# Focoos Python SDK 📦 + +Unlock the full potential of Focoos AI with the Focoos Python SDK! 🚀 This powerful SDK gives you seamless access to our cutting-edge computer vision models and tools, allowing you to effortlessly interact with the Focoos API. With just a few lines of code, you can easily **select, customize, test, and deploy** pre-trained models tailored to your specific needs. Whether you're deploying in the cloud or on edge devices, the Focoos Python SDK integrates smoothly into your workflow, speeding up your development process. + +Ready to dive in? Get started with the setup in just a few simple steps! + +[🚀 Install the Focoos Python SDK](../installation) diff --git a/docs/getting_started/quickstart.md b/docs/getting_started/quickstart.md new file mode 100644 index 0000000..11ab3a2 --- /dev/null +++ b/docs/getting_started/quickstart.md @@ -0,0 +1,28 @@ +# Quickstart 🚀 + +Getting started with Focoos AI has never been easier! In just a few steps, you can quickly set up remote inference using our built-in models. Here's a simple example of how to perform object detection with the **focoos_object365** model: + +## Step 1: Install the SDK + +First, make sure you've installed the Focoos Python SDK by following the [installation guide](../installation). + +## Step 2: Set Up Remote Inference + +With the SDK installed, you can start using the Focoos API to run inference remotely. Here's a basic code snippet to detect objects in an image using a pre-trained model: + +```python +from focoos import Focoos +import os + +# Initialize the Focoos client with your API key +focoos = Focoos(api_key=os.getenv("FOCOOS_API_KEY")) + +# Get the remote model (focoos_object365) from Focoos API +model = focoos.get_remote_model("focoos_object365") + +# Run inference on an image +detections = model.infer("./image.jpg", threshold=0.4) + +# Output the detections +print(detections) +``` diff --git a/docs/helpers/wip.md b/docs/helpers/wip.md new file mode 100644 index 0000000..01e3a85 --- /dev/null +++ b/docs/helpers/wip.md @@ -0,0 +1,7 @@ +🚧 **Work in Progress** 🚧 + +This page is currently being developed and may not be complete. + +--- + +Feel free to contribute to this page! If you have suggestions or would like to help improve it, please [contact us](mailto:info@focoos.ai). diff --git a/docs/how_to/cloud_training.md b/docs/how_to/cloud_training.md new file mode 100644 index 0000000..fe2a153 --- /dev/null +++ b/docs/how_to/cloud_training.md @@ -0,0 +1,77 @@ +# Cloud Training + +This section covers the steps to train a model in the cloud using the `focoos` library. The following example demonstrates how to interact with the Focoos API to manage models, datasets, and training jobs. + +--- + +## Listing Available Datasets + +Before training a model, you can list all available shared datasets: + +```python +from pprint import pprint +import os +from focoos import Focoos + +focoos = Focoos(api_key=os.getenv("FOCOOS_API_KEY")) + +datasets = focoos.list_shared_datasets() +pprint(datasets) +``` + +##  Initiating a Cloud Training Job + +To start training, configure the model, dataset, and training parameters as shown below: + +```python +from focoos.ports import Hyperparameters, TrainInstance + +model = focoos.get_remote_model("") + +res = model.train( + anyma_version="0.11.1", + dataset_ref="", + instance_type=TrainInstance.ML_G4DN_XLARGE, + volume_size=50, + max_runtime_in_seconds=36000, + hyperparameters=Hyperparameters( + learning_rate=0.0001, + batch_size=16, + max_iters=1500, + eval_period=100, + resolution=640, + ), # type: ignore +) +pprint(res) +``` + +##  Monitoring Training Progress + +Once the training job is initiated, monitor its progress by polling the training status. Use the following code: + +```python +import time +from pprint import pprint +from focoos.utils.logger import get_logger + +completed_status = ["Completed", "Failed"] +logger = get_logger(__name__) + +model = focoos.get_remote_model("") +status = model.train_status() + +while status["main_status"] not in completed_status: + status = model.train_status() + logger.info(f"Training status: {status['main_status']}") + pprint(f"Training progress: {status['status_transitions']}") + time.sleep(30) +``` + +##  Retrieving Training Logs + +After the training process is complete, retrieve the logs for detailed insights: + +```python +logs = model.train_logs() +pprint(logs) +``` diff --git a/docs/how_to/inference.md b/docs/how_to/inference.md new file mode 100644 index 0000000..806b0ec --- /dev/null +++ b/docs/how_to/inference.md @@ -0,0 +1,40 @@ +# Inferece + +This section covers how to perform inference using the `focoos` library. You can deploy models to the cloud for predictions, integrate with Gradio for interactive demos, or run inference locally. + +--- + +## 🤖 Cloud Inference + +```python +from focoos import Focoos + +focoos = Focoos(api_key=os.getenv("FOCOOS_API_KEY")) + +model = focoos.get_remote_model("focoos_object365") +detections = model.infer("./image.jpg", threshold=0.4) +``` + +## 🤖 Cloud Inference with Gradio + +setup `FOCOOS_API_KEY_GRADIO` environment variable with your Focoos API key + +```bash linenums="0" +pip install '.[gradio]' +``` + +```bash linenums="0" +python gradio/app.py +``` + +## 🤖 Local Inference + +```python +from focoos import Focoos + +focoos = Focoos(api_key=os.getenv("FOCOOS_API_KEY")) + +model = focoos.get_local_model("focoos_object365") + +detections = model.infer("./image.jpg", threshold=0.4) +``` diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..5c7c77c --- /dev/null +++ b/docs/index.md @@ -0,0 +1,63 @@ +# Welcome to Focoos AI 🔥 + +Focoos AI provides an advanced development platform designed to empower developers and businesses with efficient, customizable computer vision solutions. Whether you're working with data from cloud infrastructures or deploying on edge devices, Focoos AI enables you to select, fine-tune, and deploy state-of-the-art models optimized for your unique needs. + +## What We Offer 🎯 + +### AI-Ready Models Platform for Computer Vision Applications 🤖 + +Focoos AI offers a versatile platform for developing computer vision solutions. Our platform includes a suite of services to support the end-to-end development process: + +- **Ready-to-use models**: Choose from a variety of pre-trained models, optimized for different data, applications, and hardware. +- **Customization**: Tailor models to your specific needs by selecting relevant classes and fine-tuning them on your own dataset. +- **Testing and Validation**: Verify model accuracy and efficiency using your own data samples, ensuring the model meets your requirements before deployment. + +### Key Features 🔑 + +1. **Select Ready-to-use Models** 🧩 + Get started quickly by selecting one of our efficient, pre-trained models that best suits your data and application needs. + +2. **Personalize Your Model** ✨ + Customize the selected model for higher accuracy through fine-tuning. Adapt the model to your specific use case by training it on your own dataset and selecting useful classes. + +3. **Test and Validate** 🧪 + Upload your data sample to test the model’s accuracy and efficiency. Iterate the process to ensure the model performs to your expectations. + +4. **Cloud Deployment** ☁️ + Deploy the model on your preferred cloud infrastructure, whether it's your own private cloud or a public cloud service. Your data stays private, as it remains within your servers. + +5. **Edge Deployment** 🖥️ + Deploy the model on edge devices. Download the Focoos Engine to run the model locally, without sending any data over the network, ensuring full privacy. + +### Why Choose Focoos AI? 🤩 + +Using Focoos AI helps you save both time and money while delivering high-performance AI models: + +- **80% Faster Development** ⏳: Save significant development time compared to traditional methods. +- **+5% Model Accuracy** 🎯: Achieve some of the most accurate models in the market, as demonstrated by our scientific benchmarks. +- **Up to 20x Faster Models** ⚡: Run real-time data analysis with some of the fastest models available today. + +### Pre-Trained Models with Minimum Training Data 📊 + +Our pre-trained models reduce the need for large datasets, making it easier to deploy computer vision solutions. Here's how Focoos AI helps you minimize your resources: + +- **80% Less Training Data** 📉: Leverage pre-trained models that are ready to tackle a variety of use cases. +- **50% Lower Infrastructure Costs** 💡: Use less expensive hardware and reduce energy consumption. +- **75% Reduction in CO2 Emissions** 🌱: Deploy energy-efficient models that help you reduce your carbon footprint. + +### Proven Efficiency and Accuracy 🔍 + +Focoos AI models outperform other solutions in terms of both accuracy and efficiency. Our technical report highlights how our models lead in academic benchmarks across multiple domains. Contact us to learn more about the scientific benchmarks that set Focoos AI apart. + +### Pricing Model 💵 + +We offer a flexible pricing model based on your deployment preferences: + +- **Public Cloud** 🌐: Pay for model usage when deployed on public cloud providers. +- **Private Infrastructure** 🏢: Pay for usage when deploying on your own infrastructure. + +Contact us for a tailored quote based on your specific use case. + +--- + +By choosing Focoos AI, you can save time, reduce costs, and achieve superior model performance, all while ensuring the privacy and efficiency of your deployments. Ready to get started? Reach out to us today to explore how Focoos AI can power your computer vision projects. 🚀 diff --git a/docs/models.md b/docs/models.md new file mode 100644 index 0000000..a59f1b3 --- /dev/null +++ b/docs/models.md @@ -0,0 +1,21 @@ +# Focoos Foundational Models + +With the Focoos SDK, you can take advantage of a collection of foundational models that are optimized for a range of computer vision tasks. These pre-trained models, covering detection and semantic segmentation across various domains, provide an excellent starting point for your specific use case. Whether you need to fine-tune for custom requirements or adapt them to your application, these models offer a solid foundation to accelerate your development process. + +--- + +Models: + +| Model Name | Task | Metrics | Domain | +| ------------------- | --------------------- | ------- | ------------------------------- | +| focoos_object365 | Detection | - | Common Objects, 365 classes | +| focoos_rtdetr | Detection | - | Common Objects, 80 classes | +| focoos_cts_medium | Semantic Segmentation | - | Autonomous driving, 30 classes | +| focoos_cts_large | Semantic Segmentation | - | Autonomous driving, 30 classes | +| focoos_ade_nano | Semantic Segmentation | - | Common Scenes, 150 classes | +| focoos_ade_small | Semantic Segmentation | - | Common Scenes, 150 classes | +| focoos_ade_medium | Semantic Segmentation | - | Common Scenes, 150 classes | +| focoos_ade_large | Semantic Segmentation | - | Common Scenes, 150 classes | +| focoos_aeroscapes | Semantic Segmentation | - | Drone Aerial Scenes, 11 classes | +| focoos_isaid_nano | Semantic Segmentation | - | Satellite Imagery, 15 classes | +| focoos_isaid_medium | Semantic Segmentation | - | Satellite Imagery, 15 classes | diff --git a/focoos/focoos.py b/focoos/focoos.py index 7b8d68d..166cdb2 100644 --- a/focoos/focoos.py +++ b/focoos/focoos.py @@ -1,3 +1,18 @@ +""" +Focoos Module + +This module provides a Python interface for interacting with Focoos APIs, +allowing users to manage machine learning models and datasets in the Focoos ecosystem. +The module supports operations such as retrieving model metadata, downloading models, +and listing shared datasets. + +Classes: + Focoos: Main class to interface with Focoos APIs. + +Exceptions: + ValueError: Raised for invalid API responses or missing parameters. +""" + import os from typing import Optional, Union @@ -14,11 +29,35 @@ class Focoos: + """ + Main class to interface with Focoos APIs. + + This class provides methods to interact with Focoos-hosted models and datasets. + It supports functionalities such as listing models, retrieving model metadata, + downloading models, and creating new models. + + Attributes: + api_key (str): The API key for authentication. + http_client (HttpClient): HTTP client for making API requests. + user_info (dict): Information about the currently authenticated user. + cache_dir (str): Local directory for caching downloaded models. + """ + def __init__( self, api_key: str = FOCOOS_CONFIG.focoos_api_key, # type: ignore host_url: str = FOCOOS_CONFIG.default_host_url, ): + """ + Initializes the Focoos API client. + + Args: + api_key (str): API key for authentication. Defaults to value from configuration. + host_url (str): Base URL for Focoos API. Defaults to value from configuration. + + Raises: + ValueError: If the API key is not provided or user info retrieval fails. + """ self.api_key = api_key if not self.api_key: logger.error("API key is required 🤖") @@ -32,6 +71,15 @@ def __init__( ) def _get_user_info(self): + """ + Retrieves information about the authenticated user. + + Returns: + dict: Information about the user (e.g., email). + + Raises: + ValueError: If the API request fails. + """ res = self.http_client.get("user/") if res.status_code == 200: return res.json() @@ -40,6 +88,18 @@ def _get_user_info(self): raise ValueError(f"Failed to get user info: {res.status_code} {res.text}") def get_model_info(self, model_name: str) -> ModelMetadata: + """ + Retrieves metadata for a specific model. + + Args: + model_name (str): Name of the model. + + Returns: + ModelMetadata: Metadata of the specified model. + + Raises: + ValueError: If the API request fails. + """ res = self.http_client.get(f"models/{model_name}") if res.status_code == 200: return ModelMetadata.from_json(res.json()) @@ -48,6 +108,15 @@ def get_model_info(self, model_name: str) -> ModelMetadata: raise ValueError(f"Failed to get model info: {res.status_code} {res.text}") def list_models(self) -> list[ModelPreview]: + """ + Lists all available models. + + Returns: + list[ModelPreview]: List of model previews. + + Raises: + ValueError: If the API request fails. + """ res = self.http_client.get(f"models/") if res.status_code == 200: return [ModelPreview.from_json(r) for r in res.json()] @@ -56,6 +125,15 @@ def list_models(self) -> list[ModelPreview]: raise ValueError(f"Failed to list models: {res.status_code} {res.text}") def list_focoos_models(self) -> list[ModelPreview]: + """ + Lists models specific to Focoos. + + Returns: + list[ModelPreview]: List of Focoos models. + + Raises: + ValueError: If the API request fails. + """ res = self.http_client.get(f"models/focoos-models") if res.status_code == 200: return [ModelPreview.from_json(r) for r in res.json()] @@ -70,17 +148,50 @@ def get_local_model( model_ref: str, runtime_type: RuntimeTypes = FOCOOS_CONFIG.runtime_type, ) -> LocalModel: + """ + Retrieves a locally cached model or downloads it if not available. + + Args: + model_ref (str): Reference name of the model. + runtime_type (RuntimeTypes): Runtime type for the model. Defaults to configuration value. + + Returns: + LocalModel: The local model instance. + """ model_dir = os.path.join(self.cache_dir, model_ref) if not os.path.exists(os.path.join(model_dir, "model.onnx")): self._download_model(model_ref) return LocalModel(model_dir, runtime_type) def get_remote_model(self, model_ref: str) -> RemoteModel: + """ + Retrieves a remote model instance. + + Args: + model_ref (str): Reference name of the model. + + Returns: + RemoteModel: The remote model instance. + """ return RemoteModel(model_ref, self.http_client) def new_model( self, name: str, focoos_model: str, description: str ) -> Optional[RemoteModel]: + """ + Creates a new model in the Focoos system. + + Args: + name (str): Name of the new model. + focoos_model (str): Reference to the base Focoos model. + description (str): Description of the new model. + + Returns: + Optional[RemoteModel]: The created model instance, or None if creation fails. + + Raises: + ValueError: If the API request fails. + """ res = self.http_client.post( f"models/", data={ @@ -99,6 +210,15 @@ def new_model( return None def list_shared_datasets(self) -> list[DatasetMetadata]: + """ + Lists datasets shared with the user. + + Returns: + list[DatasetMetadata]: List of shared datasets. + + Raises: + ValueError: If the API request fails. + """ res = self.http_client.get(f"datasets/shared") if res.status_code == 200: return [DatasetMetadata.from_json(dataset) for dataset in res.json()] @@ -107,6 +227,18 @@ def list_shared_datasets(self) -> list[DatasetMetadata]: raise ValueError(f"Failed to list datasets: {res.status_code} {res.text}") def _download_model(self, model_ref: str) -> str: + """ + Downloads a model from the Focoos API. + + Args: + model_ref (str): Reference name of the model. + + Returns: + str: Path to the downloaded model. + + Raises: + ValueError: If the API request fails or the download fails. + """ model_dir = os.path.join(self.cache_dir, model_ref) model_path = os.path.join(model_dir, "model.onnx") metadata_path = os.path.join(model_dir, "focoos_metadata.json") @@ -157,6 +289,15 @@ def _download_model(self, model_ref: str) -> str: raise ValueError(f"Failed to download model: {res.status_code} {res.text}") def get_dataset_by_name(self, name: str) -> Optional[DatasetMetadata]: + """ + Retrieves a dataset by its name. + + Args: + name (str): Name of the dataset. + + Returns: + Optional[DatasetMetadata]: The dataset metadata if found, or None otherwise. + """ datasets = self.list_shared_datasets() for dataset in datasets: if name.lower() == dataset.name.lower(): @@ -165,6 +306,16 @@ def get_dataset_by_name(self, name: str) -> Optional[DatasetMetadata]: def get_model_by_name( self, name: str, remote=True ) -> Optional[Union[RemoteModel, LocalModel]]: + """ + Retrieves a model by its name. + + Args: + name (str): Name of the model. + remote (bool): If True, retrieve as a RemoteModel. Otherwise, as a LocalModel. Defaults to True. + + Returns: + Optional[Union[RemoteModel, LocalModel]]: The model instance if found, or None otherwise. + """ models = self.list_models() for model in models: if name.lower() == model.name.lower(): diff --git a/focoos/local_model.py b/focoos/local_model.py index b3a61a8..4643db8 100644 --- a/focoos/local_model.py +++ b/focoos/local_model.py @@ -1,3 +1,23 @@ +""" +LocalModel Module + +This module provides the `LocalModel` class that allows loading, inference, +and benchmark testing of models in a local environment. It supports detection +and segmentation tasks, and utilizes ONNXRuntime for model execution. + +Classes: + LocalModel: A class for managing and interacting with local models. + +Methods: + __init__: Initializes the LocalModel instance, loading the model, metadata, + and setting up the runtime. + _read_metadata: Reads the model metadata from a JSON file. + _annotate: Annotates the input image with detection or segmentation results. + infer: Runs inference on an input image, with optional annotation. + benchmark: Benchmarks the model's inference performance over a specified + number of iterations and input size. +""" + import os from pathlib import Path from time import perf_counter @@ -32,6 +52,20 @@ def __init__( model_dir: Union[str, Path], runtime_type: RuntimeTypes = FOCOOS_CONFIG.runtime_type, ): + """ + Initialize the LocalModel instance. + + Args: + model_dir (Union[str, Path]): Path to the model directory. + runtime_type (RuntimeTypes, optional): Type of runtime to use. Defaults to + FOCOOS_CONFIG.runtime_type. + + Raises: + FileNotFoundError: If the specified model directory does not exist. + + Initializes the model, loads metadata, and prepares the runtime environment + for inference. + """ logger.debug(f"Runtime type: {runtime_type}, Loading model from {model_dir},") if not os.path.exists(model_dir): raise FileNotFoundError(f"Model directory not found: {model_dir}") @@ -49,10 +83,29 @@ def __init__( ) def _read_metadata(self) -> ModelMetadata: + """ + Reads the model metadata from a JSON file. + + Returns: + ModelMetadata: Metadata for the model. + + Raises: + FileNotFoundError: If the metadata file does not exist in the model directory. + """ metadata_path = os.path.join(self.model_dir, "focoos_metadata.json") return ModelMetadata.from_json(metadata_path) def _annotate(self, im: np.ndarray, detections: Detections) -> np.ndarray: + """ + Annotates the input image with detection or segmentation results. + + Args: + im (np.ndarray): The input image to annotate. + detections (Detections): Detected objects or segmented regions. + + Returns: + np.ndarray: The annotated image with bounding boxes or masks. + """ classes = self.metadata.classes if classes is not None: labels = [ @@ -87,6 +140,20 @@ def infer( threshold: float = 0.5, annotate: bool = False, ) -> Tuple[FocoosDetections, Optional[np.ndarray]]: + """ + Run inference on an input image and optionally annotate the results. + + Args: + image (Union[bytes, str, Path, np.ndarray, Image.Image]): The input image to infer on. + threshold (float, optional): The confidence threshold for detections. Defaults to 0.5. + annotate (bool, optional): Whether to annotate the image with detection results. Defaults to False. + + Returns: + Tuple[FocoosDetections, Optional[np.ndarray]]: The detections from the inference and the annotated image (if applicable). + + Raises: + ValueError: If the model is not deployed locally. + """ if self.runtime is None: raise ValueError("Model is not deployed (locally)") resize = None #!TODO check for segmentation @@ -117,4 +184,14 @@ def infer( return out, im def benchmark(self, iterations: int, size: int) -> LatencyMetrics: + """ + Benchmark the model's inference performance over multiple iterations. + + Args: + iterations (int): Number of iterations to run for benchmarking. + size (int): The input size for each benchmark iteration. + + Returns: + LatencyMetrics: Latency metrics including time taken for inference. + """ return self.runtime.benchmark(iterations, size) diff --git a/focoos/remote_model.py b/focoos/remote_model.py index ad5c9a7..06f7ee1 100644 --- a/focoos/remote_model.py +++ b/focoos/remote_model.py @@ -1,3 +1,29 @@ +""" +RemoteModel Module + +This module provides a class to manage remote models in the Focoos ecosystem. It supports +various functionalities including model training, deployment, inference, and monitoring. + +Classes: + RemoteModel: A class for interacting with remote models, managing their lifecycle, + and performing inference. + + +Modules: + HttpClient: Handles HTTP requests. + logger: Logging utility. + BoxAnnotator, LabelAnnotator, MaskAnnotator: Annotation tools for visualizing + detections and segmentation tasks. + FocoosDet, FocoosDetections: Classes for representing and managing detections. + FocoosTask: Enum for defining supported tasks (e.g., DETECTION, SEMSEG). + Hyperparameters: Structure for training configuration parameters. + ModelMetadata: Contains metadata for the model. + ModelStatus: Enum for representing the current status of the model. + TrainInstance: Enum for defining available training instances. + image_loader: Utility function for loading images. + focoos_detections_to_supervision: Converter for Focoos detections to supervision format. +""" + import os import time from pathlib import Path @@ -24,7 +50,30 @@ class RemoteModel: + """ + Represents a remote model in the Focoos platform. + + Attributes: + model_ref (str): Reference ID for the model. + http_client (HttpClient): Client for making HTTP requests. + max_deploy_wait (int): Maximum wait time for model deployment. + metadata (ModelMetadata): Metadata of the model. + label_annotator (LabelAnnotator): Annotator for adding labels to images. + box_annotator (BoxAnnotator): Annotator for drawing bounding boxes. + mask_annotator (MaskAnnotator): Annotator for drawing masks on images. + """ + def __init__(self, model_ref: str, http_client: HttpClient): + """ + Initialize the RemoteModel instance. + + Args: + model_ref (str): Reference ID for the model. + http_client (HttpClient): HTTP client instance for communication. + + Raises: + ValueError: If model metadata retrieval fails. + """ self.model_ref = model_ref self.http_client = http_client self.max_deploy_wait = 10 @@ -38,6 +87,15 @@ def __init__(self, model_ref: str, http_client: HttpClient): ) def get_info(self) -> ModelMetadata: + """ + Retrieve model metadata. + + Returns: + ModelMetadata: Metadata of the model. + + Raises: + ValueError: If the request fails. + """ res = self.http_client.get(f"models/{self.model_ref}") if res.status_code != 200: logger.error(f"Failed to get model info: {res.status_code} {res.text}") @@ -53,7 +111,28 @@ def train( instance_type: TrainInstance = TrainInstance.ML_G4DN_XLARGE, volume_size: int = 50, max_runtime_in_seconds: int = 36000, - ): + ) -> dict | None: + """ + Initiate the training of a remote model on the Focoos platform. + + This method sends a request to the Focoos platform to start the training process for the model + referenced by `self.model_ref`. It requires a dataset reference and hyperparameters for training, + as well as optional configuration options for the instance type, volume size, and runtime. + + Args: + dataset_ref (str): The reference ID of the dataset to be used for training. + hyperparameters (Hyperparameters): A structure containing the hyperparameters for the training process. + anyma_version (str, optional): The version of Anyma to use for training. Defaults to "anyma-sagemaker-cu12-torch22-0111". + instance_type (TrainInstance, optional): The type of training instance to use. Defaults to TrainInstance.ML_G4DN_XLARGE. + volume_size (int, optional): The size of the disk volume (in GB) for the training instance. Defaults to 50. + max_runtime_in_seconds (int, optional): The maximum runtime for training in seconds. Defaults to 36000. + + Returns: + dict: A dictionary containing the response from the training initiation request. The content depends on the Focoos platform's response. + + Raises: + ValueError: If the request to start training fails (e.g., due to incorrect parameters or server issues). + """ res = self.http_client.post( f"models/{self.model_ref}/train", data={ @@ -65,23 +144,45 @@ def train( "hyperparameters": hyperparameters.model_dump(), }, ) - if res.status_code == 200: - return res.json() - else: + if res.status_code != 200: logger.warning(f"Failed to train model: {res.status_code} {res.text}") return None + return res.json() - def train_status(self): + def train_status(self) -> dict | None: + """ + Retrieve the current status of the model training. + + Sends a request to check the training status of the model referenced by `self.model_ref`. + + Returns: + dict: A dictionary containing the training status information. + + Raises: + ValueError: If the request to get training status fails. + """ res = self.http_client.get(f"models/{self.model_ref}/train/status") - if res.status_code == 200: - return res.json() - else: + if res.status_code != 200: logger.error(f"Failed to get train status: {res.status_code} {res.text}") raise ValueError( f"Failed to get train status: {res.status_code} {res.text}" ) + return res.json() def train_logs(self) -> list[str]: + """ + Retrieve the training logs for the model. + + This method sends a request to fetch the logs of the model's training process. If the request + is successful (status code 200), it returns the logs as a list of strings. If the request fails, + it logs a warning and returns an empty list. + + Returns: + list[str]: A list of training logs as strings. + + Raises: + None: Returns an empty list if the request fails. + """ res = self.http_client.get(f"models/{self.model_ref}/train/logs") if res.status_code == 200: return res.json() @@ -90,6 +191,20 @@ def train_logs(self) -> list[str]: return [] def _annotate(self, im: np.ndarray, detections: Detections) -> np.ndarray: + """ + Annotate an image with detection results. + + This method adds visual annotations to the provided image based on the model's detection results. + It handles different tasks (e.g., object detection, semantic segmentation, instance segmentation) + and uses the corresponding annotator (bounding box, label, or mask) to draw on the image. + + Args: + im (np.ndarray): The image to be annotated, represented as a NumPy array. + detections (Detections): The detection results to be annotated, including class IDs and confidence scores. + + Returns: + np.ndarray: The annotated image as a NumPy array. + """ classes = self.metadata.classes if classes is not None: labels = [ @@ -124,6 +239,26 @@ def infer( threshold: float = 0.5, annotate: bool = False, ) -> Tuple[FocoosDetections, Optional[np.ndarray]]: + """ + Perform inference on the provided image using the remote model. + + This method sends an image to the remote model for inference and retrieves the detection results. + Optionally, it can annotate the image with the detection results. + + Args: + image (Union[str, Path, bytes]): The image to infer on, which can be a file path, a string representing the path, or raw bytes. + threshold (float, optional): The confidence threshold for detections. Defaults to 0.5. + annotate (bool, optional): Whether to annotate the image with the detection results. Defaults to False. + + Returns: + Tuple[FocoosDetections, Optional[np.ndarray]]: + - FocoosDetections: The detection results including class IDs, confidence scores, etc. + - Optional[np.ndarray]: The annotated image if `annotate` is True, else None. + + Raises: + FileNotFoundError: If the provided image file path is invalid. + ValueError: If the inference request fails. + """ image_bytes = None if isinstance(image, str) or isinstance(image, Path): if not os.path.exists(image): @@ -160,17 +295,48 @@ def infer( logger.error(f"Failed to infer: {res.status_code} {res.text}") raise ValueError(f"Failed to infer: {res.status_code} {res.text}") - def train_metrics(self, period=60) -> Optional[dict]: + def train_metrics(self, period=60) -> dict | None: + """ + Retrieve training metrics for the model over a specified period. + + This method fetches the training metrics for the remote model, including aggregated values, + such as average performance metrics over the given period. + + Args: + period (int, optional): The period (in seconds) for which to fetch the metrics. Defaults to 60. + + Returns: + Optional[dict]: A dictionary containing the training metrics if the request is successful, + or None if the request fails. + """ res = self.http_client.get( f"models/{self.model_ref}/train/all-metrics?period={period}&aggregation_type=Average" ) - if res.status_code == 200: - return res.json() - else: + if res.status_code != 200: logger.warning(f"Failed to get train logs: {res.status_code} {res.text}") return None + return res.json() def _log_metrics(self): + """ + Log the latest training metrics for the model. + + This method retrieves the current training metrics, such as iteration, total loss, and evaluation + metrics (like mIoU for segmentation tasks or AP50 for detection tasks). It logs the most recent values + for these metrics, helping monitor the model's training progress. + + The logged metrics depend on the model's task: + - For segmentation tasks (SEMSEG), the mean Intersection over Union (mIoU) is logged. + - For detection tasks, the Average Precision at 50% IoU (AP50) is logged. + + Returns: + None: The method only logs the metrics without returning any value. + + Logs: + - Iteration number. + - Total loss value. + - Relevant evaluation metric (mIoU or AP50). + """ metrics = self.train_metrics() if metrics: iter = ( @@ -201,7 +367,27 @@ def _log_metrics(self): f"Iter {iter:.0f}: Loss {total_loss:.2f}, {eval_metric} {accuracy}" ) - def monitor_train(self, update_period=30): + def monitor_train(self, update_period=30) -> None: + """ + Monitor the training process of the model and log its status periodically. + + This method continuously checks the model's training status and logs updates based on the current state. + It monitors the primary and secondary statuses of the model, and performs the following actions: + - If the status is "Pending", it logs a waiting message and waits for resources. + - If the status is "InProgress", it logs the current status and elapsed time, and logs the training metrics if the model is actively training. + - If the status is "Completed", it logs the final metrics and exits. + - If the training fails, is stopped, or any unexpected status occurs, it logs the status and exits. + + Args: + update_period (int, optional): The time (in seconds) to wait between status checks. Default is 30 seconds. + + Returns: + None: This method does not return any value but logs information about the training process. + + Logs: + - The current training status, including elapsed time. + - Training metrics at regular intervals while the model is training. + """ completed_status = ["Completed", "Failed", "Stopped"] # init to make do-while status = {"main_status": "Flag", "secondary_status": "Flag"} @@ -238,7 +424,22 @@ def monitor_train(self, update_period=30): logger.info(f"Model is not training, status: {status['main_status']}") return - def stop_training(self): + def stop_training(self) -> None: + """ + Stop the training process of the model. + + This method sends a request to stop the training of the model identified by `model_ref`. + If the request fails, an error is logged and a `ValueError` is raised. + + Raises: + ValueError: If the stop training request fails. + + Logs: + - Error message if the request to stop training fails, including the status code and response text. + + Returns: + None: This method does not return any value. + """ res = self.http_client.delete(f"models/{self.model_ref}/train") if res.status_code != 200: logger.error(f"Failed to get stop training: {res.status_code} {res.text}") @@ -246,7 +447,23 @@ def stop_training(self): f"Failed to get stop training: {res.status_code} {res.text}" ) - def delete_model(self): + def delete_model(self) -> None: + """ + Delete the model from the system. + + This method sends a request to delete the model identified by `model_ref`. + If the request fails or the status code is not 204 (No Content), an error is logged + and a `ValueError` is raised. + + Raises: + ValueError: If the delete model request fails or does not return a 204 status code. + + Logs: + - Error message if the request to delete the model fails, including the status code and response text. + + Returns: + None: This method does not return any value. + """ res = self.http_client.delete(f"models/{self.model_ref}") if res.status_code != 204: logger.error(f"Failed to delete model: {res.status_code} {res.text}") diff --git a/focoos/runtime.py b/focoos/runtime.py index 7f9ed4f..76855bb 100644 --- a/focoos/runtime.py +++ b/focoos/runtime.py @@ -1,3 +1,23 @@ +""" +Runtime Module for ONNX-based Models + +This module provides the necessary functionality for loading, preprocessing, +running inference, and benchmarking ONNX-based models using different execution +providers such as CUDA, TensorRT, OpenVINO, and CPU. It includes utility functions +for image preprocessing, postprocessing, and interfacing with the ONNXRuntime library. + +Functions: + preprocess_image: Preprocesses an image for model input. + postprocess_image: Postprocesses the output image from the model. + image_to_byte_array: Converts a PIL image to a byte array. + det_postprocess: Postprocesses detection model outputs into Detections. + semseg_postprocess: Postprocesses semantic segmentation model outputs into Detections. + get_runtime: Returns an ONNXRuntime instance configured for the given runtime type. + +Classes: + ONNXRuntime: A class that interfaces with ONNX Runtime for model inference. +""" + import io from pathlib import Path from time import perf_counter @@ -21,6 +41,17 @@ def preprocess_image(bytes, dtype=np.float32) -> Tuple[np.ndarray, Image.Image]: + """ + Preprocesses the input image (in bytes) for inference by converting it to a numpy array. + + Args: + bytes (bytes): Image data in bytes format (e.g., JPEG, PNG). + dtype (np.dtype, optional): The data type to cast the image array to. Defaults to np.float32. + + Returns: + Tuple[np.ndarray, Image.Image]: A tuple containing the processed image as a numpy array + and the original PIL image. + """ pil_img = Image.open(io.BytesIO(bytes)) img_numpy = np.ascontiguousarray( np.array(pil_img).transpose(2, 0, 1)[np.newaxis, :] # HWC->CHW @@ -31,11 +62,30 @@ def preprocess_image(bytes, dtype=np.float32) -> Tuple[np.ndarray, Image.Image]: def postprocess_image( cmapped_image: np.ndarray, input_image: Image.Image ) -> Image.Image: + """ + Postprocesses the output of an inference to blend the results with the original image. + + Args: + cmapped_image (np.ndarray): The processed image, typically with segmentation or detection results. + input_image (Image.Image): The original input image. + + Returns: + Image.Image: The blended image showing the result of postprocessing. + """ out = Image.fromarray(cmapped_image) return Image.blend(input_image, out, 0.6) def image_to_byte_array(image: Image.Image) -> bytes: + """ + Converts a PIL Image into a byte array. + + Args: + image (Image.Image): The input image to be converted. + + Returns: + bytes: The byte array representing the image. + """ img_byte_arr = io.BytesIO() image.save(img_byte_arr, format="JPEG") img_byte_arr = img_byte_arr.getvalue() @@ -45,6 +95,18 @@ def image_to_byte_array(image: Image.Image) -> bytes: def det_postprocess( out: np.ndarray, im0_shape: Tuple[int, int], conf_threshold: float ) -> Detections: + """ + Postprocesses the output of an object detection model and filters detections + based on a confidence threshold. + + Args: + out (np.ndarray): The output of the detection model. + im0_shape (Tuple[int, int]): The original shape of the input image (height, width). + conf_threshold (float): The confidence threshold for filtering detections. + + Returns: + Detections: A Detections object containing the filtered bounding boxes, class ids, and confidences. + """ cls_ids, boxes, confs = out boxes[:, 0::2] *= im0_shape[1] boxes[:, 1::2] *= im0_shape[0] @@ -60,6 +122,18 @@ def det_postprocess( def semseg_postprocess( out: np.ndarray, im0_shape: Tuple[int, int], conf_threshold: float ) -> Detections: + """ + Postprocesses the output of a semantic segmentation model and filters based + on a confidence threshold. + + Args: + out (np.ndarray): The output of the semantic segmentation model. + im0_shape (Tuple[int, int]): The original shape of the input image (height, width). + conf_threshold (float): The confidence threshold for filtering detections. + + Returns: + Detections: A Detections object containing the masks, class ids, and confidences. + """ cls_ids, mask, confs = out[0][0], out[1][0], out[2][0] masks = np.zeros((len(cls_ids), *mask.shape), dtype=bool) for i, cls_id in enumerate(cls_ids): @@ -78,9 +152,33 @@ def semseg_postprocess( class ONNXRuntime: + """ + A class that interfaces with ONNX Runtime for model inference using different execution providers + (CUDA, TensorRT, OpenVINO, CoreML, etc.). It manages preprocessing, inference, and postprocessing + of data, as well as benchmarking the performance of the model. + + Attributes: + logger (Logger): Logger for the ONNXRuntime instance. + name (str): The name of the model (derived from its path). + opts (OnnxEngineOpts): Options used for configuring the ONNX Runtime. + model_metadata (ModelMetadata): Metadata related to the model. + postprocess_fn (Callable): The function used to postprocess the model's output. + ort_sess (InferenceSession): The ONNXRuntime inference session. + dtype (np.dtype): The data type for the model input. + binding (Optional[str]): The binding type for the runtime (e.g., CUDA, CPU). + """ + def __init__( self, model_path: str, opts: OnnxEngineOpts, model_metadata: ModelMetadata ): + """ + Initializes the ONNXRuntime instance with the specified model and configuration options. + + Args: + model_path (str): Path to the ONNX model file. + opts (OnnxEngineOpts): The configuration options for ONNX Runtime. + model_metadata (ModelMetadata): Metadata for the model (e.g., task type). + """ self.logger = get_logger() self.logger.debug(f"[onnxruntime device] {ort.get_device()}") self.logger.debug( @@ -204,6 +302,16 @@ def __init__( self.logger.info(f"⏱️ [onnxruntime] {self.name} WARMUP DONE") def __call__(self, im: np.ndarray, conf_threshold: float) -> Detections: + """ + Runs inference on the provided input image and returns the model's detections. + + Args: + im (np.ndarray): The preprocessed input image. + conf_threshold (float): The confidence threshold for filtering results. + + Returns: + Detections: A Detections object containing the model's output detections. + """ out_name = None input_name = self.ort_sess.get_inputs()[0].name out_name = [output.name for output in self.ort_sess.get_outputs()] @@ -233,6 +341,16 @@ def __call__(self, im: np.ndarray, conf_threshold: float) -> Detections: return detections def benchmark(self, iterations=20, size=640) -> LatencyMetrics: + """ + Benchmarks the model by running multiple inference iterations and measuring the latency. + + Args: + iterations (int, optional): Number of iterations to run for benchmarking. Defaults to 20. + size (int, optional): The input image size for benchmarking. Defaults to 640. + + Returns: + LatencyMetrics: The latency metrics (e.g., FPS, mean, min, max, and standard deviation). + """ self.logger.info(f"⏱️ [onnxruntime] Benchmarking latency..") size = size if isinstance(size, (tuple, list)) else (size, size) @@ -293,6 +411,19 @@ def get_runtime( model_metadata: ModelMetadata, warmup_iter: int = 0, ) -> ONNXRuntime: + """ + Creates and returns an ONNXRuntime instance based on the specified runtime type + and model path, with options for various execution providers (CUDA, TensorRT, CPU, etc.). + + Args: + runtime_type (RuntimeTypes): The type of runtime to use (e.g., ONNX_CUDA32, ONNX_TRT32). + model_path (str): The path to the ONNX model. + model_metadata (ModelMetadata): Metadata describing the model. + warmup_iter (int, optional): Number of warmup iterations before benchmarking. Defaults to 0. + + Returns: + ONNXRuntime: A fully configured ONNXRuntime instance. + """ if runtime_type == RuntimeTypes.ONNX_CUDA32: opts = OnnxEngineOpts( cuda=True, verbose=False, fp16=False, warmup_iter=warmup_iter diff --git a/mkdocs.yaml b/mkdocs.yaml new file mode 100644 index 0000000..7c83908 --- /dev/null +++ b/mkdocs.yaml @@ -0,0 +1,101 @@ +site_name: Focoos AI SDK + +repo_url: https://github.com/FocoosAI/focoos +edit_uri: edit/main/docs + +theme: + name: material + palette: + - scheme: default + media: "(prefers-color-scheme: light)" + toggle: + icon: material/weather-sunny + name: Switch to dark mode + - scheme: slate + media: "(prefers-color-scheme: dark)" + toggle: + icon: material/weather-night + name: Switch to light mode + font: + text: Merriweather Sans + code: Red Hat Mono + logo: assets/logo.svg + favicon: assets/favicon.svg + icon: + repo: fontawesome/brands/github + features: + - content.code.copy + - navigation.tabs + - navigation.tabs.sticky + +extra_css: + - assets/stylesheets/custom.css + +plugins: + - search + - mkdocstrings: + enable_inventory: true + handlers: + python: + options: + docstring_style: google + - include-markdown + +nav: + - Home: + - Focoos AI: index.md + - Datasets: datasets.md + - Models: models.md + - Getting Started: + - Introduction: getting_started/introduction.md + - Installation: getting_started/installation.md + - Quickstart: getting_started/quickstart.md + - How to: + - Cloud Training: how_to/cloud_training.md + - Inference: how_to/inference.md + - API Reference: + - focoos: api/focoos.md + - remote model: api/remote_model.md + - local model: api/local_model.md + - runtime: api/runtime.md + - Development: + - Contributing: development/contributing.md + - Code of Conduct: development/code_of_conduct.md + - Changelog: development/changelog.md + +markdown_extensions: + - pymdownx.highlight: + linenums: true + - pymdownx.tabbed: + alternate_style: true + - pymdownx.details + - pymdownx.snippets: + restrict_base_path: false + - admonition + - tables + - toc: + permalink: "#" + - attr_list + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format + + +extra: + version: + provider: mike + analytics: + provider: google + property: G-RP4PM5PGLN + social: + - icon: fontawesome/brands/github + link: https://github.com/FocoosAI/focoos + - icon: fontawesome/brands/linkedin + link: https://www.linkedin.com/company/focoosai + +copyright: Copyright © 2024 FocoosAI + +watch: + - focoos diff --git a/notebooks/concrete.ipynb b/notebooks/concrete.ipynb index e539973..103e95d 100644 --- a/notebooks/concrete.ipynb +++ b/notebooks/concrete.ipynb @@ -126,45 +126,6 @@ "source": [ "model.monitor_train()" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Step 4. Deploy the model on Focoos servers to be ready for inference!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model.deploy(deployment_mode=DeploymentMode.REMOTE, wait=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Select multiple random images\n", - "PATHS = [os.path.join(PATH, p) for p in os.listdir(PATH)]\n", - "\n", - "from utils import start_gradio\n", - "\n", - "start_gradio(\n", - " model=model, paths=PATHS, allowed_paths=[\"/Users/fcdl94/Develop/focoos/data\"]\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/pyproject.toml b/pyproject.toml index 638090b..a994bd3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,6 +42,12 @@ dev = [ "gradio~=5.3.0", "sniffio~=1.2.0", ] +docs = [ + "mkdocs>=1.6.0,<2.0.0", + "mkdocs-material>=9.5.28,<10.0.0", + "mkdocstrings[python]>=0.25.1,<0.26.0", + "mkdocs-include-markdown-plugin>=6.2.1,<7.0.0", +] cpu = ["onnxruntime==1.20.1"] gpu = ["onnxruntime-gpu==1.20.1","nvidia-cuda-runtime-cu12==12.4.127"] tensorrt = ["tensorrt==10.5.0"]