From 2dc20cc13719d0045c751f3bf4c40fe50c372c98 Mon Sep 17 00:00:00 2001 From: Brandon <132288221+brandon-groundlight@users.noreply.github.com> Date: Wed, 9 Oct 2024 13:23:20 -0700 Subject: [PATCH] officially out of beta (#263) --- README.md | 2 - docs/blog/2023-12-06-framegrab.md | 189 ------------------ docs/blog/2023-12-15-best-practices.md | 101 ---------- docs/blog/2024-01-02-groundlight-pi-gen.md | 80 -------- docs/docs/getting-started/getting-started.mdx | 2 - docs/docusaurus.config.js | 12 ++ docs/package-lock.json | 8 +- docs/package.json | 2 +- pyproject.toml | 2 +- 9 files changed, 18 insertions(+), 380 deletions(-) delete mode 100644 docs/blog/2023-12-06-framegrab.md delete mode 100644 docs/blog/2023-12-15-best-practices.md delete mode 100644 docs/blog/2024-01-02-groundlight-pi-gen.md diff --git a/README.md b/README.md index 7b7161d2..5e852f13 100644 --- a/README.md +++ b/README.md @@ -24,8 +24,6 @@ print(f"The answer is {image_query.result}") Your images are first analyzed by machine learning (ML) models which are automatically trained on your data. If those models have high enough confidence, that's your answer. But if the models are unsure, then the images are progressively escalated to more resource-intensive analysis methods up to real-time human review. So what you get is a computer vision system that starts working right away without even needing to first gather and label a dataset. At first it will operate with high latency, because people need to review the image queries. But over time, the ML systems will learn and improve so queries come back faster with higher confidence. -_Note: The SDK is currently in "beta" phase. Interfaces are subject to change in future versions. We will follow [semver](https://semver.org/) semantics for breaking changes._ - ## Learn more Some more resources you might like: diff --git a/docs/blog/2023-12-06-framegrab.md b/docs/blog/2023-12-06-framegrab.md deleted file mode 100644 index f865bf44..00000000 --- a/docs/blog/2023-12-06-framegrab.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: Introducing Groundlight's FrameGrab Library -description: We would like to introduce you to the FrameGrab library. -slug: introducing-framegrab -authors: - - name: Tim Huff - title: Engineering intern at Groundlight - image_url: https://a-us.storyblok.com/f/1015187/1000x1000/06b25bf1a6/hufft.jpg - - name: Blaise Munyampirwa - title: Engineer at Groundlight - image_url: https://a-us.storyblok.com/f/1015187/1000x1000/d12109465d/munyampirwab.jpg - - name: Leo Dirac - title: CTO and Co-founder at Groundlight - image_url: https://a-us.storyblok.com/f/1015187/284x281/602a9c95c5/diracl.png - - name: Tyler Romero - title: Senior ML Engineer at Groundlight - image_url: https://a-us.storyblok.com/f/1015187/1000x1000/368053d79a/romerot.jpg - - name: Michael Vogelsong - title: Chief ML Engineer at Groundlight - image_url: https://a-us.storyblok.com/f/1015187/1000x1000/c87b9d30f4/vogelsongm.jpg - - -tags: [groundlight-extensions, framegrab] -image: /img/gl-icon400.png -hide_table_of_contents: false ---- - - - -At Groundlight, we continue to build infrastructure that allows our customers to easily use computer -vision without a pre-existing dataset for industrial inspection, retail analytics, mobile robotics, and -much more. We've built many features towards the goal of declarative computer vision, and today we are excited to -introduce FrameGrab, a Python library designed to make it easy to grab frames from -cameras or streams. - -FrameGrab supports generic USB cameras, RTSP streams, Basler USB cameras, Basler GigE cameras, and Intel RealSense depth cameras. - - - - -## Grabbing Camera Frames - -Frame grabber objects are configured through YAML. The configuration combines the camera type, camera ID, and the camera -options. The YAML config contains many configurable features, but only `input_type` is required. Valid choices for -`input_type` include - -* generic_usb -* rtsp -* realsense -* basler - -Here is an example of how to use the generic USB configuration - -```python notest -from framegrab import FrameGrabber - -config = """ -name: Front Door Camera -input_type: generic_usb -id: - serial_number: 23432570 -options: - resolution: - height: 1080 - width: 1920 - zoom: - digital: 1.5 -""" - -grabber = FrameGrabber.create_grabber_yaml(config) -frame = grabber.grab() - -# Do real work with the frame - -# Finally release the grabber object -grabber.release() - -``` - -For the full set of configurable parameters, please refer to the [FrameGrab repository](https://github.com/groundlight/framegrab/tree/main). - -## Multi-cam Configuration - -If you have multiple cameras of the same type plugged in, we recommend you include serial numbers in the YAML config to -ensure proper pairing. The default pairing behavior is sequential (i.e., configurations will be paired with cameras in -a sequential ordering). - -You can add serial numbers for multiple cameras like this - -```yaml -GL_CAMERAS: | - - name: on robot arm - input_type: realsense - options: - depth: - side_by_side: 1 - crop: - relative: - right: .8 - - name: conference room - input_type: rtsp - id: - rtsp_url: rtsp://admin:password@192.168.1.20/cam/realmonitor?channel=1&subtype=0 - options: - crop: - pixels: - top: 350 - bottom: 1100 - left: 1100 - right: 2000 - - name: workshop - input_type: generic_usb - id: - serial_number: B77D3A8F - -``` - -## FrameGrab Autodiscovery Mode - -Among other features, FrameGrab also includes autodiscovery mode. This allows you to automatically connect to all cameras -that are plugged into your machine (or discoverable on the network). Autodiscovery will load up default configurations -for each camera. - -:::note - -Please note that RTSP streams cannot be autodiscovered in this manner. RTSP URLs must be pre-specified in the -configurations. - -::: - -We recommend autodiscovery for simple applications where you don't need to set any special options on your cameras. -It is also a convenient method for finding the serial numbers of your cameras in case they are not printed on them. - -Below is a short example of how to launch autodiscovery mode. - -```python notest -from framegrab import FrameGrabber - -grabbers = FrameGrabber.autodiscover() - -# Print some information about the discovered cameras -for grabber in grabbers.values(): - print(grabber.config) - - # Do real work - - # Release the frame grabber object - grabber.release() - -``` - - -## Using FrameGrab for Motion Detection - -With this release, we also continue to support [motion detection](https://en.wikipedia.org/wiki/Motion_detection) via frame differencing, a -fast algorithm for easily detecting motion in a sequence of frames. - -To use motion detection, initialize the MotionDetector instance with the desired percentage of pixels -needed to change in an image for it to be flagged for motion and the minimum brightness change for each pixel for it -to be considered changed. Here is a comprehensive example. - -```python notest -from framegrab import FrameGrabber, MotionDetector - -config = { - 'input_type': 'webcam', -} -grabber = FrameGrabber.create_grabber(config) -motion_detector = MotionDetector(pct_threshold=motion_threshold, val_threshold=60) - -while True: - frame = grabber.grab() - if frame is None: - print("No frame captured!") - continue - - if motion_detector.motion_detected(frame): - print("Motion detected!") - -``` - - -## Conclusion - -Recent releases of FrameGrab add various easy to use features. We now support -multiple camera types and continue to support motion detection. - -If you encounter any issues while using FrameGrab, please feel free to file an issue in our [GitHub repository](https://github.com/groundlight/framegrab) -and while there, review guidelines for [contributing](https://github.com/groundlight/framegrab#contributing) to this library. diff --git a/docs/blog/2023-12-15-best-practices.md b/docs/blog/2023-12-15-best-practices.md deleted file mode 100644 index 96f0ac99..00000000 --- a/docs/blog/2023-12-15-best-practices.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: Best practices for best results with Groundlight -description: How to get the best chance of success from Groundlight detectors -slug: best-practices -authors: - - name: Paulina Varshavskaya - title: Head of R&D at Groundlight - image_url: https://a-us.storyblok.com/f/1015187/1000x1000/932933bc26/varshap.jpg - - name: Sunil Kumar - title: ML Engineer at Groundlight - image_url: https://a-us.storyblok.com/f/1015187/1000x1000/a265e322bd/kumars.jpg - - name: Blake Thorne - title: Head of Marketing at Groundlight - image_url: https://a-us.storyblok.com/f/1015187/1000x1000/daf4a78ec3/thorneb.jpg - -tags: [how-to, best-practices] -image: /img/gl-icon400.png -hide_table_of_contents: false ---- - -Want to get the best chance of success from your new Groundlight detectors? Here are five suggestions from the Groundlight science team that can help you get the best performance possible. - -Come at it from the point of view of making answering your image query question as easy as possible. -Pretend you’re explaining the task to a novice. What would you need to do to set them up for success? - - - -## Phrase the question right -You will have more success asking questions that can in principle be answered by a reasonable person -simply by looking at a single image from the detector. - -:white_check_mark: **DO:** "Is there a part staged in front of the robot ready for picking up?" -:x: **DON'T:** "Am I awesome?" - -Think about how you will use the output of your detector, so the Yes and No answers align with your expectations. A technically correct answer to a vague question may be of no use to you. For example, if you have a camera pointing down on a kitchen range and would like to get an alert if there's a fire, phrase the query so that normal gas burner flames are excluded. - -:white_check_mark: **DO:** "Is there a fire in the pan? (Ignore normal gas burner flames)" -:x: **DON'T:** "Is there a fire?" - -## Put details in the notes -Is any specialized knowledge required to answer your query? -Use the notes dialog to provide explanations of any assumed knowledge or definitions of technical terms. -Like in the fire extinguisher example above, consider adding short definitions inside the text of the query. - -:white_check_mark: **DO:** “Is the fiducial (etched arrow on the gear surface) aligned with the painted chain link?” -:white_check_mark: **DO:** “Is the fence fully closed? (Metal bar on the fence must be touching the plywood wall)” - -Here’s an example of detailed notes for a detector asking “Is there a streetcar visible? READ NOTES”: - -[![Screenshot of detailed notes for a detector](./images/2023-12-15-best-practices/streetcar_visible_notes.png "Detailed notes for a detector asking \"Is there a streetcar visible? READ NOTES\"")](./images/2023-12-15-best-practices/streetcar_visible_notes.png) - -In this case, the customer even drew on the example images to point out where the street car might appear. -Detailed notes may be especially useful if the question is about a smaller region of the scene in the image. - -## Think of edge cases -How do you want to treat unclear or edge cases? -Sometimes it’s impossible to answer the question based on the image, for example, when it’s too dark -at night to tell, or the view is temporarily obstructed by something moving in front of the camera. -Do you know how you’d like to treat those cases? - -:white_check_mark: **DO:** Add notes like “If the image is too dark to tell, the answer should be YES.” - -In the fire extinguisher example below, the customer wrote “If you can’t see the fire extinguisher, -it is blocked” inside the query text, drawing attention to the most important potential edge case. - -[![Screenshot for a detector with edge case in query](./images/2023-12-15-best-practices/fire_extinguisher_blocked_yes.png "A detector with a detailed query including a likely potential edge case (fire extinguisher not visible).")](./images/2023-12-15-best-practices/fire_extinguisher_blocked_yes.png) - -Detailed notes on foreseeable edge cases will prevent confusion by backend labelers and result in -quicker learning for your detector at less cost to you. - -## Seed with a few examples -It helps to add a few labels yourself early on, in order to provide good examples for backend labelers and the -new ML model. For best results, if you have example images for both YES and NO answers, send -them through early on, and add the corresponding labels. Having at least 2 customer “ground truth” -answers for each class of Yes or No will also give you ML performance metrics on your detector. - -![Blue button before 2 examples of each class are provided](./images/2023-12-15-best-practices/label_button_before.png "") - -## Only you know the truth -Check periodically under the Flagged tab on your detector's detail page to see if any images may still be confusing. Click on the "Override Label" button to provide the correct answer in those cases. - -[![Screenshot of image flagged as needing better examples](./images/2023-12-15-best-practices/flagged_images.png "Partial screenshot of a Flagged view")](./images/2023-12-15-best-practices/flagged_images.png) - -It's also good practice to continue adding a few ground truth labels here and there by clicking on the “Keep labeling” button -on the detector details page, in order to get tighter confidence bounds on your detector’s performance metrics. - ---- -> :mortar_board: *Read an in-depth discussion of how we assess detector accuracy and the confidence bounds reported for your detector's performance:* -[Tales from the Binomial Tail: Confidence intervals for balanced accuracy](2024-01-16-binomial-tails.md) ---- - -If you notice labeling mistakes, correct them by providing your own answer. Then consider adding -extra instructions in the notes. You can upload screenshots or images inside the notes dialog too. -Our labeling staff will be notified whenever you make changes to your notes so they stay up to date with how you want your detector to behave and can quickly address misconceptions. - -## [Ready to start?](https://app.groundlight.ai) - -We hope these tips give you a great start. If you haven’t already, you can sign up for a free account at https://app.groundlight.ai. Dive into [Groundlight Pi-Gen](https://github.com/groundlight/groundlight-pi-gen) for a hassle-free introduction to AI-powered computer vision on Raspberry Pi. - -If you have any questions, please reach out to us on the in-application chat or via email to support@groundlight.ai. - diff --git a/docs/blog/2024-01-02-groundlight-pi-gen.md b/docs/blog/2024-01-02-groundlight-pi-gen.md deleted file mode 100644 index 80f8e6b6..00000000 --- a/docs/blog/2024-01-02-groundlight-pi-gen.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Linux OS Images for Computer Vision on Raspberry Pi -description: Groundlight simplifies the setup process by providing ready-to-use OS images for Raspberry Pi -slug: raspberry-pi-computer-vision -authors: - - name: Blaise Munyampirwa - title: Engineer at Groundlight - image_url: https://a-us.storyblok.com/f/1015187/1000x1000/d12109465d/munyampirwab.jpg - - name: Leo Dirac - title: CTO and Co-founder at Groundlight - image_url: https://a-us.storyblok.com/f/1015187/284x281/602a9c95c5/diracl.png - -tags: [raspberry-pi, mns] -image: /img/gl-icon400.png -hide_table_of_contents: false ---- - -Happy New Year everybody! If you got a fancy new Raspberry Pi 5 for Christmas, you might be wondering what to do with it. Well, we have a suggestion: build a computer vision application with it! And we have all the tools you need to get started. - -Raspberry Pi offers a great platform for computer vision (CV), ranging from home hobby projects to serious industrial applications. However, setting up a Raspberry Pi for computer vision can be a time-consuming process. [Groundlight Pi-Gen](https://github.com/groundlight/groundlight-pi-gen), simplifies the setup process by providing ready-to-use OS images for Raspberry Pi. - - - -(Note that here, when we say "image" we mean an OS image, which is a file containing a snapshot of an operating system - linux - that can be installed onto a new machine. These are not photos or pictures, which are also of course important in computer vision. Oh jargon...) - -## Raspberry Pi OS Images pre-built with Computer Vision Software -To download a Linux image for your Raspberry Pi, loaded with all the software you need for computer vision, -go to the [releases](https://github.com/groundlight/groundlight-pi-gen/releases) section in Groundlight Pi-Gen to find Raspberry Pi OS images (`.img.xz` files) that have pre-configured software environments for computer vision. These images are ready to be flashed onto a Raspberry Pi. - -These include a fast, modern version of python (3.11), along with key libraries like [OpenCV](https://opencv.org/) for classic algorithms and device management, [Numpy](https://numpy.org/) for fast math, [FrameGrab](https://code.groundlight.ai/python-sdk/blog/introducing-framegrab) for declarative access to image sources, and of course [Groundlight](https://pypi.org/project/groundlight/) for fully-managed visual understanding models. We've set up a `venv` for you to avoid the dreaded "externally-managed-environment" error which plagues many newer python versions, while still letting you use good-old `pip` to add more. (We like `poetry` and `conda`, and these will also work fine if you prefer them.) - -There are several flavors of OS image available. The smaller ones are suitable for headless use, while the larger ones include a desktop GUI with a browser. The key differences are the size of the download and the amount of time it takes to flash the image onto a microSD card. The [available flavors in the current release](https://github.com/groundlight/groundlight-pi-gen/releases) are: - -![Comparison of Groundlight Pi-Gen OS image flavors](./images/2024-01-02-groundlight-pi-gen/download-assets.png "Comparison of Groundlight Pi-Gen OS image flavors") - -- `desktop`: Image with Groundlight MNS and a desktop GUI with a browser. Appropriate for a Raspberry Pi with a screen attached. -- `mns-headless`: Image with Groundlight Monitoring Notification Server (MNS) for headless use. -- `sdk-only`: Minimal image with the Python SDK and core libraries. Suitable for headless use on smaller Raspberry Pi models such as the Pi Zero. - -A couple more flavors you might be interested in: We're planning a [kiosk mode](https://github.com/groundlight/groundlight-pi-gen/issues/15) for the desktop image, so that you can run a Groundlight MNS instance on a Raspberry Pi with a screen attached, and have it automatically start up in a browser. -Also note that the `edge` version which will download and run the ML models locally is not yet supported on Raspberry Pi, because the edge models requires a CUDA GPU. - -## Flashing the OS Image onto a microSD Card - -Once you have [downloaded your image file](https://github.com/groundlight/groundlight-pi-gen/releases), the next step is to flash it onto a microSD card. To do this, -download the [Raspberry Pi Imager](https://www.raspberrypi.com/software/) software. - -![Raspberry Pi Imager home screen](./images/2024-01-02-groundlight-pi-gen/rpi-imager-1.png "Raspberry Pi Imager home screen") - -After selecting your hardware type under "Choose Device", click "Choose OS" and scroll to the bottom to "Use custom". - -![Raspberry Pi Imager use custom OS](./images/2024-01-02-groundlight-pi-gen/rpi-imager-2.png "Raspberry Pi Imager use custom OS") - -Then select the `.img.xz` file you downloaded. - -![Raspberry Pi Imager pick OS file](./images/2024-01-02-groundlight-pi-gen/rpi-imager-3.png "Raspberry Pi Imager pick OS file") - -Then choose your microSD card with the "Choose Storage" button, and then click "Next". -You'll get a prompt asking "Use OS customization?" which is optional, but very cool. Choose "Edit settings", and you -can set your Wi-Fi credentials, enable SSH login with a public key. - -![Rasterberry Pi Imager OS customization](./images/2024-01-02-groundlight-pi-gen/rpi-imager-4.png "Rasterberry Pi Imager OS customization") - -When you're done configuring settings, click "Save" and then "Yes" to confirm. Writing the image to the microSD card will take a few minutes. When it's done, just pop the SD card into your pi, and power it up! If it all works properly, you'll be able to access your Raspberry Pi over the network without needing to plug in a keyboard, mouse, or monitor. (We like to plug it into Ethernet for the first boot, because we find that the Raspberry Pi's Wi-Fi can be a bit finicky, even if properly configured.) - - -### No-code machine vision with Monitoring Notification Server (MNS) -If you opted to install the `desktop` or `mns-headless` image, you'll have a web application called the [Groundlight Monitoring Notification Server (MNS)](https://github.com/groundlight/monitoring-notification-server), -which is a web application that allows you set up a computer vision pipeline without writing any code, and have it notify you when it detects something of interest. - -After setting up your Raspberry Pi with Groundlight OS, wait a few minutes for it to finish downloading everything, and then access the MNS by navigating to `http://[your-raspberry-pi's-IP-address]:3000` in a web browser, or if you're running the desktop version, open [`http://localhost:3000/`](http://localhost:3000). - -![MNS sample home screen](./images/2024-01-02-groundlight-pi-gen/mns-home.png "MNS sample home screen") - -It will prompt you for your [Groundlight API token](docs/getting-started/api-tokens), which you can get with a free account at [app.groundlight.ai](https://app.groundlight.ai). Then you can describe your visual query in natural language, and how you want the MNS to notify you when it detects something of interest. For best-practices on how to describe your visual query, see [this blog post](https://code.groundlight.ai/python-sdk/blog/best-practices). - -## Get Started for Free -To start building your own computer vision solutions, sign up for a free account at [app.groundlight.ai](https://app.groundlight.ai). Dive into Groundlight Pi-Gen for a hassle-free introduction to AI-powered computer vision on Raspberry Pi. - -If you have any questions, please reach out to us on the in-application chat at [app.groundlight.ai](https://app.groundlight.ai) or on [GitHub](https://github.com/groundlight/python-sdk/issues). diff --git a/docs/docs/getting-started/getting-started.mdx b/docs/docs/getting-started/getting-started.mdx index de6ce1a2..b1abf684 100644 --- a/docs/docs/getting-started/getting-started.mdx +++ b/docs/docs/getting-started/getting-started.mdx @@ -14,8 +14,6 @@ image_query = gl.submit_image_query(detector=det, image=img) print(f"The answer is {image_query.result}") ``` -_Note: The SDK is currently in "beta" phase. Interfaces are subject to change in future versions. We will follow [semver](https://semver.org/) semantics for breaking changes._ - ### How does it work? Your images are first analyzed by machine learning (ML) models which are automatically trained on your data. If those models have high enough [confidence](docs/building-applications/4-managing-confidence.md), that's your answer. But if the models are unsure, then the images are progressively escalated to more resource-intensive analysis methods up to real-time human review. So what you get is a computer vision system that starts working right away without even needing to first gather and label a dataset. At first it will operate with high latency, because people need to review the image queries. But over time, the ML systems will learn and improve so queries come back faster with higher confidence. diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index d6ec35dc..cd79ecc7 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -230,6 +230,18 @@ const config = { to: "https://www.groundlight.ai/blog/building-your-first-computer-vision-model-just-got-easier", // new marketing site route from: "/blog/getting-started", // old blog route }, + { + to: "https://www.groundlight.ai/blog/introducing-groundlights-framegrab-library", + from: "/blog/introducing-framegrab" + }, + { + to: "https://www.groundlight.ai/blog/best-practices-for-best-results-with-groundlight", + from: "/blog/best-practices" + }, + { + to: "https://www.groundlight.ai/blog/linux-os-images-for-computer-vision-on-raspberry-pi", + from: "/blog/raspberry-pi-computer-vision" + }, ], }, ], diff --git a/docs/package-lock.json b/docs/package-lock.json index f486d971..cbf48655 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -17,7 +17,7 @@ "prism-react-renderer": "^2.3.0", "react": "^18.0.0", "react-dom": "^18.0.0", - "rehype-katex": "^7.0.0", + "rehype-katex": "^7.0.1", "remark-math": "^6.0.0" }, "devDependencies": { @@ -12849,9 +12849,9 @@ } }, "node_modules/rehype-katex": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/rehype-katex/-/rehype-katex-7.0.0.tgz", - "integrity": "sha512-h8FPkGE00r2XKU+/acgqwWUlyzve1IiOKwsEkg4pDL3k48PiE0Pt+/uLtVHDVkN1yA4iurZN6UES8ivHVEQV6Q==", + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/rehype-katex/-/rehype-katex-7.0.1.tgz", + "integrity": "sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==", "dependencies": { "@types/hast": "^3.0.0", "@types/katex": "^0.16.0", diff --git a/docs/package.json b/docs/package.json index 7ba32434..91cf1615 100644 --- a/docs/package.json +++ b/docs/package.json @@ -24,7 +24,7 @@ "prism-react-renderer": "^2.3.0", "react": "^18.0.0", "react-dom": "^18.0.0", - "rehype-katex": "^7.0.0", + "rehype-katex": "^7.0.1", "remark-math": "^6.0.0" }, "devDependencies": { diff --git a/pyproject.toml b/pyproject.toml index 9b398174..2b5228ba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ packages = [ {include = "**/*.py", from = "src"}, ] readme = "README.md" -version = "0.18.1" +version = "0.18.2" [tool.poetry.dependencies] # For certifi, use ">=" instead of "^" since it upgrades its "major version" every year, not really following semver