From b71c0546ff5351a9e1d2866c99ed027dbbafcd1f Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Mon, 6 Jan 2025 10:14:51 -0600 Subject: [PATCH 1/4] Hide edit button on homepage --- docs/index.md | 5 +++ docs/scripts/gen_ref_pages.py | 62 ++++++++++++++++++----------------- 2 files changed, 37 insertions(+), 30 deletions(-) diff --git a/docs/index.md b/docs/index.md index 8f1fcc2481..1a3638767a 100644 --- a/docs/index.md +++ b/docs/index.md @@ -19,6 +19,11 @@ hide: max-width: 50rem; margin: auto; } + +/* hide edit button */ +article > a.md-content__button.md-icon:first-child { + display: none; +} ![Roboflow Inference banner](https://github.com/roboflow/inference/blob/main/banner.png?raw=true) diff --git a/docs/scripts/gen_ref_pages.py b/docs/scripts/gen_ref_pages.py index 63063780d2..2250b9675e 100644 --- a/docs/scripts/gen_ref_pages.py +++ b/docs/scripts/gen_ref_pages.py @@ -3,39 +3,41 @@ from pathlib import Path import mkdocs_gen_files +import os SKIP_MODULES = [ "inference.enterprise.device_manager.command_handler", "inference.enterprise.parallel.celeryconfig", ] -for package in ["inference", "inference_sdk", "inference_cli"]: - nav = mkdocs_gen_files.Nav() - src = Path(__file__).parent.parent.parent / package - - for path in sorted(p for p in src.rglob("*.py") if "landing" not in p.parts): - module_path = path.relative_to(src.parent).with_suffix("") - doc_path = path.relative_to(src.parent).with_suffix(".md") - full_doc_path = Path("reference", doc_path) - - parts = list(module_path.parts) - identifier = ".".join(parts) - if parts[-1] == "__main__" or parts[-1] == "__init__" or identifier in SKIP_MODULES: - # print("SKIPPING", identifier) - continue - - nav[parts] = f"/reference/{module_path.as_posix()}.md" - - with mkdocs_gen_files.open(full_doc_path, "w") as fd: - fd.write(f"::: {identifier}") - - edit_path = f"https://github.com/roboflow/inference/tree/main/{module_path.as_posix()}.py" - # print("Edit path:", edit_path) - mkdocs_gen_files.set_edit_path(full_doc_path, edit_path) - - with mkdocs_gen_files.open(f"reference/{package}/index.md", "w") as nav_file: - generator = nav.build_literate_nav() - lines = list(generator) - # print("GENERATING NAVIGATION") - # print("\n".join(lines)) - nav_file.writelines(lines) +if not os.environ.get("SKIP_CODEGEN"): + for package in ["inference", "inference_sdk", "inference_cli"]: + nav = mkdocs_gen_files.Nav() + src = Path(__file__).parent.parent.parent / package + + for path in sorted(p for p in src.rglob("*.py") if "landing" not in p.parts): + module_path = path.relative_to(src.parent).with_suffix("") + doc_path = path.relative_to(src.parent).with_suffix(".md") + full_doc_path = Path("reference", doc_path) + + parts = list(module_path.parts) + identifier = ".".join(parts) + if parts[-1] == "__main__" or parts[-1] == "__init__" or identifier in SKIP_MODULES: + # print("SKIPPING", identifier) + continue + + nav[parts] = f"/reference/{module_path.as_posix()}.md" + + with mkdocs_gen_files.open(full_doc_path, "w") as fd: + fd.write(f"::: {identifier}") + + edit_path = f"https://github.com/roboflow/inference/tree/main/{module_path.as_posix()}.py" + # print("Edit path:", edit_path) + mkdocs_gen_files.set_edit_path(full_doc_path, edit_path) + + with mkdocs_gen_files.open(f"reference/{package}/index.md", "w") as nav_file: + generator = nav.build_literate_nav() + lines = list(generator) + # print("GENERATING NAVIGATION") + # print("\n".join(lines)) + nav_file.writelines(lines) From b8ad9bf08402593b6563cbb2b7d16707d3fb2f76 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Mon, 6 Jan 2025 11:46:29 -0600 Subject: [PATCH 2/4] Update Landing Page Copy --- docs/index.md | 307 ++++++++---------------------------------------- docs/styles.css | 4 + 2 files changed, 51 insertions(+), 260 deletions(-) diff --git a/docs/index.md b/docs/index.md index 1a3638767a..a59ed4b287 100644 --- a/docs/index.md +++ b/docs/index.md @@ -5,7 +5,53 @@ hide: - toc --- + + +## Make Any Camera an AI Camera + +Inference turns any computer or edge device into a command center for your computer vision projects. + +* 🛠️ Self-host [your own fine-tuned models](/quickstart/explore_models.md) +* 🧠 Access the latest and greatest foundation models (like [Florence-2](https://blog.roboflow.com/florence-2/), [CLIP](https://blog.roboflow.com/openai-clip/), and [SAM2](https://blog.roboflow.com/what-is-segment-anything-2/)) +* 🤝 Use Workflows to track, count, time, measure, and visualize +* 👁️ Combine ML with traditional CV methods (like OCR, Barcode Reading, QR, and template matching) +* 📈 Monitor, record, and analyze predictions +* 🎥 Manage cameras and video streams +* 📬 Send notifications when events happen +* 🛜 Connect with external systems and APIs +* 🔗 [Extend](/workflows/create_workflow_block.md) with your own code and models +* 🚀 Deploy production systems at scale + +See [Example Workflows](https://roboflow.com/workflows/templates) for common use-cases like detecting small objects with SAHI, multi-model consensus, active learning, reading license plates, blurring faces, background removal, and more. + +Get started with our "Run your first model" guide + + - -![Roboflow Inference banner](https://github.com/roboflow/inference/blob/main/banner.png?raw=true) - -Roboflow Inference is an open-source platform designed to simplify the deployment of computer vision models. It enables developers to perform object detection, classification, instance segmentation and [keypoint detection](/quickstart/run_keypoint_detection.md), and utilize foundation models like [CLIP](/foundation/clip.md), [Segment Anything](/foundation/sam.md), and [YOLO-World](/foundation/yolo_world.md) through a Python-native package, a self-hosted inference server, or a fully [managed API](https://docs.roboflow.com/). - -Explore our [enterprise options](https://roboflow.com/sales) for advanced features like server deployment, active learning, and commercial licenses for YOLOv5 and YOLOv8. - -Get started with our "Run your first model" guide - - - - - -Here is an example of a model running on a video using Inference: - - - -## 💻 install - -Inference package requires [**Python>=3.8,<=3.11**](https://www.python.org/). Click [here](/quickstart/docker.md) to learn more about running Inference inside Docker. - -```bash -pip install inference -``` - -
-👉 running on a GPU - - To enhance model performance in GPU-accelerated environments, install CUDA-compatible dependencies instead: - - ```bash - pip install inference-gpu - ``` -
- -
-👉 advanced models - - Inference supports multiple model types for specialized tasks. From Grounding DINO for identifying objects with a text prompt, to DocTR for OCR, to CogVLM for asking questions about images - you can find out more in the Foundation Models page. - -

- - Note that inference and inference-gpu packages install only the minimal shared dependencies. Instead, install model-specific dependencies to ensure code compatibility and license compliance. - -

- - The inference and inference-gpu packages install only the minimal shared dependencies. Install model-specific dependencies to ensure code compatibility and license compliance. Learn more about the models supported by Inference. - - ```bash - pip install inference[yolo-world] - ``` - -
- -## 🔥 quickstart - -Use Inference SDK to run models locally with just a few lines of code. The image input can be a URL, a numpy array, or a PIL image. - -```python -from inference import get_model - -model = get_model(model_id="yolov8n-640") - -results = model.infer("https://media.roboflow.com/inference/people-walking.jpg") -``` - -
-👉 roboflow models - -
- -Set up your ROBOFLOW_API_KEY to access thousands of fine-tuned models shared by the Roboflow Universe community and your custom model. Navigate to 🔑 keys section to learn more. - -```python -from inference import get_model - -model = get_model(model_id="soccer-players-5fuqs/1") - -results = model.infer( - image="https://media.roboflow.com/inference/soccer.jpg", - confidence=0.5, - iou_threshold=0.5 -) -``` - -
- -
-👉 foundational models - -- CLIP Embeddings - generate text and image embeddings that you can use for zero-shot classification or assessing image similarity. - - ```python - from inference.models import Clip - - model = Clip() - - embeddings_text = clip.embed_text("a football match") - embeddings_image = model.embed_image("https://media.roboflow.com/inference/soccer.jpg") - ``` - -- Segment Anything - segment all objects visible in the image or only those associated with selected points or boxes. - - ```python - from inference.models import SegmentAnything - - model = SegmentAnything() - - result = model.segment_image("https://media.roboflow.com/inference/soccer.jpg") - ``` - -- YOLO-World - an almost real-time zero-shot detector that enables the detection of any objects without any training. - - ```python - from inference.models import YOLOWorld - - model = YOLOWorld(model_id="yolo_world/l") - - result = model.infer( - image="https://media.roboflow.com/inference/dog.jpeg", - text=["person", "backpack", "dog", "eye", "nose", "ear", "tongue"], - confidence=0.03 - ) - ``` - -
- -## 📟 inference server - -You can also run Inference as a microservice with Docker. - -### deploy server - -The inference server is distributed via Docker. Behind the scenes, inference will download and run the image that is appropriate for your hardware. [Here](/quickstart/docker.md#advanced-build-a-docker-container-from-scratch), you can learn more about the supported images. - -```bash -inference server start -``` - -### run client - -Consume inference server predictions using the HTTP client available in the Inference SDK. - -```python -from inference_sdk import InferenceHTTPClient - -client = InferenceHTTPClient( - api_url="http://localhost:9001", - api_key= -) -with client.use_model(model_id="soccer-players-5fuqs/1"): - predictions = client.infer("https://media.roboflow.com/inference/soccer.jpg") -``` - -If you're using the hosted API, change the local API URL to `https://detect.roboflow.com`. Accessing the hosted inference server and/or using any of the fine-tuned models require a `ROBOFLOW_API_KEY`. For further information, visit the 🔑 keys section. - -## 🎥 inference pipeline - -The inference pipeline is an efficient method for processing static video files and streams. Select a model, define the video source, and set a callback action. You can choose from predefined callbacks that allow you to [display results](/reference/inference/core/interfaces/stream/sinks.md#inference.core.interfaces.stream.sinks.render_boxes) on the screen or [save them to a file](/reference/inference/core/interfaces/stream/sinks.md#inference.core.interfaces.stream.sinks.VideoFileSink). - -```python -from inference import InferencePipeline -from inference.core.interfaces.stream.sinks import render_boxes - -pipeline = InferencePipeline.init( - model_id="yolov8x-1280", - video_reference="https://media.roboflow.com/inference/people-walking.mp4", - on_prediction=render_boxes -) - -pipeline.start() -pipeline.join() -``` - -## 🔑 keys - -Inference enables the deployment of a wide range of pre-trained and foundational models without an API key. To access thousands of fine-tuned models shared by the [Roboflow Universe](https://universe.roboflow.com/) community, [configure your](https://app.roboflow.com/settings/api) API key. - -```bash -export ROBOFLOW_API_KEY= -``` - -## 📚 documentation - -Visit our [documentation](/) to explore comprehensive guides, detailed API references, and a wide array of tutorials designed to help you harness the full potential of the Inference package. - -## © license - -The Roboflow Inference code is distributed under the [Apache 2.0](https://github.com/roboflow/inference/blob/master/LICENSE.core) license. However, each supported model is subject to its licensing. Detailed information on each model's license can be found [here](https://roboflow.com/licensing). - - -## ⚡️ extras - -Below you can find list of extras available for `inference` and `inference-gpu` - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameDescriptionNotes
clipCLIP modelN/A
gazeL2CS-Net modelN/A
grounding-dinoGrounding Dino modelN/A
samSAM and SAM2 modelsThe extras depend on rasterio which require GDAL library to work. If the installation fails with gdal-config command error - run sudo apt-get install libgdal-dev for Linux or follow official installation guide
yolo-worldYolo-World modelN/A
transformerstransformers based models, like Florence-2N/A
- -??? note "Installing extras" - - To install specific extras you need to run - - ```bash - pip install inferenence[extras-name] - ``` - or - - ```bash - pip install inferenence-gpu[extras-name] - ``` \ No newline at end of file + \ No newline at end of file diff --git a/docs/styles.css b/docs/styles.css index e0c1d42cbc..1090ae1b64 100644 --- a/docs/styles.css +++ b/docs/styles.css @@ -18,6 +18,10 @@ justify-content: space-between; } +.grecaptcha-badge { + box-shadow: none !important; +} + @media screen and (min-width: 76.25em) { .md-nav__item--section>.md-nav__link { font-weight: 700; From e64d345a261a2ebd6b2b82e5ec63181d1232b541 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Mon, 6 Jan 2025 13:22:57 -0600 Subject: [PATCH 3/4] Add video tutorials --- docs/index.md | 93 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/docs/index.md b/docs/index.md index a59ed4b287..3a61abf4a8 100644 --- a/docs/index.md +++ b/docs/index.md @@ -32,7 +32,53 @@ See [Example Workflows](https://roboflow.com/workflows/templates) for common use Build a visual agent with Workflows +## Video Tutorials + +
+ +
+ + Smart Parking with AI + +
+ + Tutorial: Build a Smart Parking System + +
Created: 27 Nov 2024
+

+ Build a smart parking lot management system using Roboflow Workflows! + This tutorial covers license plate detection with YOLOv8, object tracking + with ByteTrack, and real-time notifications with a Telegram bot. +

+
+
+ + +
+ + Workflows Tutorial + +
+ + Tutorial: Build a Traffic Monitoring Application with Workflows + +
Created: 22 Oct 2024
+

+ Learn how to build and deploy Workflows for common use-cases like detecting + vehicles, filtering detections, visualizing results, and calculating dwell + time on a live video stream. +

+
+
+ + +
+ \ No newline at end of file From 7e7a6816e9bbe447210a690768fef7a4f9807bf1 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Mon, 6 Jan 2025 13:30:50 -0600 Subject: [PATCH 4/4] Update link --- README.md | 2 +- docs/index.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index de5ddfbd13..15fe26efac 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ Inference turns any computer or edge device into a command center for your compu * 🔗 [Extend](https://inference.roboflow.com/workflows/create_workflow_block/) with your own code and models * 🚀 Deploy production systems at scale -See [Example Workflows](https://roboflow.com/workflows/templates) for common use-cases like detecting small objects with SAHI, multi-model consensus, active learning, reading license plates, blurring faces, background removal, and more. +See [Example Workflows](https://inference.roboflow.com/workflows/gallery/) for common use-cases like detecting small objects with SAHI, multi-model consensus, active learning, reading license plates, blurring faces, background removal, and more. [Time In Zone Workflow Example](https://github.com/user-attachments/assets/743233d9-3460-442d-83f8-20e29e76b346) diff --git a/docs/index.md b/docs/index.md index 3a61abf4a8..e7793c1337 100644 --- a/docs/index.md +++ b/docs/index.md @@ -24,7 +24,7 @@ Inference turns any computer or edge device into a command center for your compu * 🔗 [Extend](/workflows/create_workflow_block.md) with your own code and models * 🚀 Deploy production systems at scale -See [Example Workflows](https://roboflow.com/workflows/templates) for common use-cases like detecting small objects with SAHI, multi-model consensus, active learning, reading license plates, blurring faces, background removal, and more. +See [Example Workflows](/workflows/gallery/index.md) for common use-cases like detecting small objects with SAHI, multi-model consensus, active learning, reading license plates, blurring faces, background removal, and more. Get started with our "Run your first model" guide