From fc91891d767f83dceab6f4522e0fb6c3f232bd20 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 31 Dec 2025 05:54:38 -0700 Subject: [PATCH 01/17] Correctly set query padding --- frigate/api/media.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frigate/api/media.py b/frigate/api/media.py index 783b42e97..971bfef83 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -1935,7 +1935,7 @@ async def label_clip(request: Request, camera_name: str, label: str): try: event = event_query.get() - return await event_clip(request, event.id) + return await event_clip(request, event.id, 0) except DoesNotExist: return JSONResponse( content={"success": False, "message": "Event not found"}, status_code=404 From b268ded5a27115729d46d2a559e234f065462799 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 31 Dec 2025 06:41:38 -0700 Subject: [PATCH 02/17] Adjust AMD headers and add community badge --- docs/docs/configuration/hardware_acceleration_video.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/docs/configuration/hardware_acceleration_video.md b/docs/docs/configuration/hardware_acceleration_video.md index f7368e623..03ab33100 100644 --- a/docs/docs/configuration/hardware_acceleration_video.md +++ b/docs/docs/configuration/hardware_acceleration_video.md @@ -31,11 +31,11 @@ Frigate supports presets for optimal hardware accelerated video decoding: - [Raspberry Pi](#raspberry-pi-34): Frigate can utilize the media engine in the Raspberry Pi 3 and 4 to slightly accelerate video decoding. -**Nvidia Jetson** +**Nvidia Jetson** - [Jetson](#nvidia-jetson): Frigate can utilize the media engine in Jetson hardware to accelerate video decoding. -**Rockchip** +**Rockchip** - [RKNN](#rockchip-platform): Frigate can utilize the media engine in RockChip SOCs to accelerate video decoding. @@ -184,11 +184,11 @@ If you are passing in a device path, make sure you've passed the device through Frigate can utilize modern AMD integrated GPUs and AMD GPUs to accelerate video decoding using VAAPI. -:::note +### Configuring Radeon Driver You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `config.yml` for HA Add-on users](advanced.md#environment_vars). -::: +### Via VAAPI VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. From 33dd1703842566d4f2f0da48fd1bd20c43d84e6b Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 31 Dec 2025 06:47:38 -0700 Subject: [PATCH 03/17] Simplify getting started guide for camera wizard --- docs/docs/guides/getting_started.md | 28 +++++----------------------- 1 file changed, 5 insertions(+), 23 deletions(-) diff --git a/docs/docs/guides/getting_started.md b/docs/docs/guides/getting_started.md index 3b07d8d5b..8c90a6f33 100644 --- a/docs/docs/guides/getting_started.md +++ b/docs/docs/guides/getting_started.md @@ -134,31 +134,13 @@ Now you should be able to start Frigate by running `docker compose up -d` from w This section assumes that you already have an environment setup as described in [Installation](../frigate/installation.md). You should also configure your cameras according to the [camera setup guide](/frigate/camera_setup). Pay particular attention to the section on choosing a detect resolution. -### Step 1: Add a detect stream +### Step 1: Start Frigate -First we will add the detect stream for the camera: +At this point you should be able to start Frigate and a basic config will be created automatically. -```yaml -mqtt: - enabled: False +### Step 2: Add a camera -cameras: - name_of_your_camera: # <------ Name the camera - enabled: True - ffmpeg: - inputs: - - path: rtsp://10.0.10.10:554/rtsp # <----- The stream you want to use for detection - roles: - - detect -``` - -### Step 2: Start Frigate - -At this point you should be able to start Frigate and see the video feed in the UI. - -If you get an error image from the camera, this means ffmpeg was not able to get the video feed from your camera. Check the logs for error messages from ffmpeg. The default ffmpeg arguments are designed to work with H264 RTSP cameras that support TCP connections. - -FFmpeg arguments for other types of cameras can be found [here](../configuration/camera_specific.md). +You can click the `Add Camera` button to use the camera setup wizard to get your first camera added into Frigate. ### Step 3: Configure hardware acceleration (recommended) @@ -173,7 +155,7 @@ services: frigate: ... devices: - - /dev/dri/renderD128:/dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware + - /dev/dri/renderD128:/dev/dri/renderD128 # for intel & amd hwaccel, needs to be updated for your hardware ... ``` From 3d5a02973be3ae2ab316c051e66729c4b78e5066 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 1 Jan 2026 13:36:09 -0600 Subject: [PATCH 04/17] add optimizing performance guide --- docs/docs/frigate/index.md | 2 +- docs/docs/frigate/optimizing_performance.md | 1271 +++++++++++++++++++ docs/sidebars.ts | 1 + 3 files changed, 1273 insertions(+), 1 deletion(-) create mode 100644 docs/docs/frigate/optimizing_performance.md diff --git a/docs/docs/frigate/index.md b/docs/docs/frigate/index.md index 83162022c..fa4e8d4f5 100644 --- a/docs/docs/frigate/index.md +++ b/docs/docs/frigate/index.md @@ -6,7 +6,7 @@ slug: / A complete and local NVR designed for Home Assistant with AI object detection. Uses OpenCV and Tensorflow to perform realtime object detection locally for IP cameras. -Use of a [Recommended Detector](/frigate/hardware#detectors) is optional, but strongly recommended. CPU detection should only be used for testing purposes. +Use of a [Recommended Detector](/frigate/hardware#detectors) is optional, but strongly recommended. CPU detection should only be used for testing purposes. See the [Optimizing Performance Guide](/frigate/optimizing_performance) for tips on getting the most out of your hardware. - Tight integration with Home Assistant via a [custom component](https://github.com/blakeblackshear/frigate-hass-integration) - Designed to minimize resource use and maximize performance by only looking for objects when and where it is necessary diff --git a/docs/docs/frigate/optimizing_performance.md b/docs/docs/frigate/optimizing_performance.md new file mode 100644 index 000000000..04a89900d --- /dev/null +++ b/docs/docs/frigate/optimizing_performance.md @@ -0,0 +1,1271 @@ +--- +id: optimizing_performance +title: Optimizing Performance +--- + +# Optimizing Performance + +Optimizing Frigate's performance is key to ensuring a responsive system and minimizing resource usage. This guide covers the most impactful configuration changes you can make to improve efficiency. + +## 1. Hardware Acceleration for Video Decoding + +**Priority: Critical** + +Video decompression is one of the most CPU-intensive tasks in Frigate. While an AI accelerator handles object detection, it does not assist with decoding video streams, as described in the [getting started guide](../guides/getting_started). Hardware acceleration (hwaccel) offloads this work to your GPU or specialized video decode hardware, significantly reducing CPU usage and enabling you to support more cameras on the same hardware. + +### Key Concepts + +**Resolution & FPS Impact:** The decoding burden grows exponentially with resolution and frame rate. A 4K stream at 30 FPS requires roughly 4 times the processing power of a 1080p stream at the same frame rate, and doubling the frame rate doubles the decode workload. This is why hardware acceleration becomes critical when working with multiple high-resolution cameras. + +**Hardware Acceleration Benefits:** By using dedicated video decode hardware (Intel QuickSync, NVIDIA NVDEC, AMD VCE, or VA-API), you can: + +- Reduce CPU usage by 50-80% per camera stream +- Support 2-3x more cameras on the same hardware +- Free up CPU resources for motion detection and other Frigate processes +- Reduce system heat and power consumption + +### Configuration + +Frigate provides preset configurations for common hardware acceleration scenarios. Set up `hwaccel_args` based on your hardware in your [configuration](../configuration/reference) as described in the [getting started guide](../guides/getting_started). + +### Troubleshooting Hardware Acceleration + +If hardware acceleration isn't working: + +1. Check Frigate logs for FFmpeg errors related to hwaccel +2. Verify the hardware device is accessible inside the container +3. Ensure your camera streams use H.264 or H.265 codecs (most common) +4. Try different presets if the automatic detection fails +5. Check that your GPU drivers are properly installed on the host system + +## 2. Detector Selection and Configuration + +**Priority: Critical** + +Choosing the right detector for your hardware is the single most important factor for detection performance. The detector is responsible for running the AI model that identifies objects in video frames. Different detector types have vastly different performance characteristics and hardware requirements, as detailed in the [detector documentation](../configuration/object_detectors). + +### Understanding Detector Performance + +Frigate uses motion detection as a first-line check before running expensive object detection, as explained in the [motion detection documentation](../configuration/motion_detection). When motion is detected, Frigate creates a "region" (the green boxes in the debug viewer) and sends it to the detector. The detector's inference speed determines how many detections per second your system can handle. + +**Calculating Detector Capacity:** Your detector has a finite capacity measured in detections per second. With an inference speed of 10ms, your detector can handle approximately 100 detections per second (1000ms / 10ms = 100).If your cameras collectively require more than this capacity, you'll experience delays, missed detections, or the system will fall behind. + +### Choosing the Right Detector + +Different detectors have vastly different performance characteristics, as shown in the [detector documentation](../configuration/object_detectors): + +**OpenVINO on Intel:** + +- Inference speed: 10-35ms depending on iGPU/GPU +- Best for: Systems with Intel CPUs (6th gen+) or Arc GPUs +- Requires: Intel CPU with integrated graphics or discrete Intel GPU + +**Hailo-8/8L:** + +- Inference speed: 7-11ms for yolov6n +- Best for: Newer installations seeking alternatives to Coral +- Requires: M.2 Hailo device + +**NVIDIA TensorRT:** + +- Inference speed: 8-17ms depending on GPU and model +- Best for: Systems with NVIDIA GPUs, especially for many cameras +- Requires: NVIDIA GPU with CUDA support + +**AMD ROCm:** + +- Inference speed: 14-50ms depending on model size +- Best for: Systems with AMD discrete GPUs +- Requires: Supported AMD GPU, use -rocm Frigate image + +**CPU Detector (Not Recommended):** + +- Inference speed: 50-300ms depending on CPU +- Best for: Testing only, not production use +- Note: Even low-end dedicated detectors vastly outperform CPU detection + +### Multiple Detector Instances + +When a single detector cannot keep up with your camera count, some detector types (`openvino`, `onnx`) allow you to define multiple detector instances to share the workload. This is particularly useful with GPU-based detectors that have sufficient VRAM to run multiple inference processes: + +```yaml +detectors: + ov_0: + type: openvino + device: GPU + ov_1: + type: openvino + device: GPU +``` + +**When to add a second detector:** + +- Your detection_fps consistently falls below your configured detect fps +- The detector process shows 100% utilization in System Metrics +- You're experiencing delays in object detection appearing in the UI +- You see "detection appears to be stuck" warnings in logs + +### Model Selection and Optimization + +The model you use significantly impacts detector performance. Frigate provides default models optimized for each detector type, but you can customize them as described in the [detector documentation](../configuration/object_detectors). + +**Model Size Trade-offs:** + +- Smaller models (320x320): Faster inference, Frigate is specifically optimized for a 320x320 size model. +- Larger models (640x640): Slower inference, can often times have better accuracy depending on the camera frame and objects you are trying to detect, but also prevents Frigate from zooming in as much on the frame. + +## 3. Camera Stream Configuration + +**Priority: High** + +Properly configuring your camera streams is essential for optimal performance. Frigate supports multiple input streams per camera, allowing you to use different resolutions and frame rates for different purposes, as explained in the [camera documentation](../configuration/cameras). + +### Detect Stream Resolution + +Your `detect` stream resolution should be just high enough to reliably identify the objects you care about, but no higher. Higher resolutions dramatically increase both CPU (decoding) and detector (inference) workload, as noted in the [getting started guide](../guides/getting_started) and [camera documentation](../configuration/cameras). + +**Resolution Guidelines:** + +- **1280x720 (720p):** Recommended starting point for most installations in the [getting started guide](../guides/getting_started). +- **640x480 (VGA):** Suitable for close-range detection (doorways, small rooms) +- **1920x1080 (1080p):** Only if detecting small objects at distance +- **Avoid 4K for detection:** Rarely necessary and extremely resource-intensive + +**Important:** Frigate will try to automatically detect the stream resolution if not specified, but it is recommended to explicitly set it in the [getting started guide](../guides/getting_started) to ensure consistency and help with troubleshooting. + +### Frame Rate Optimization + +A higher frame rate for detection increases CPU and detector load without always improving accuracy. Most motion happens over multiple frames, so 5 FPS is typically sufficient for reliable object detection, as described in the [configuration reference](../configuration/reference) and [motion detection documentation](../configuration/motion_detection). + +**Recommended Configuration:** + +```yaml +detect: + fps: 5 +``` + +**Frame Rate Guidelines:** + +- **5 FPS:** Ideal for most use cases, recommended default in the [getting started guide](../guides/getting_started). +- **3-4 FPS:** Acceptable for stationary camera monitoring slow-moving objects +- **10 FPS:** Rarely beneficial, significantly increases resource usage. Only recommended for users with dedicated LPR cameras where the plate crosses the full frame very quickly. +- **Over 10 FPS**: Significantly increases resource usage for no benefit. +- **Reduce at camera level:** If possible, configure your camera to output 5 FPS directly in firmware to save bandwidth and decoding cycles + +**Why 5 FPS works:** Frigate uses motion detection to decide when to run object detection. At 5 FPS, there's only 200ms between frames, which is fast enough to catch any significant motion while using 1/6th the resources of 30 FPS, as explained in the [motion detection documentation](../configuration/motion_detection). + +### Separate Streams for Detect vs. Record + +One of the most impactful optimizations is using separate streams for detection and recording. This allows you to use a low-resolution sub-stream for efficient detection while recording high-quality footage from the main stream, as recommended in the [getting started guide](../guides/getting_started) and [camera documentation](../configuration/cameras). + +**Benefits of separate streams:** + +- Detect stream can be 720p @ 5 FPS for efficiency +- Record stream can be 4K @ 15-30 FPS for quality +- Reduces detector workload by 75% or more +- Maintains high-quality recordings for evidence +- Allows independent optimization of each stream + +**Single Stream Configuration:** If you must use a single stream, Frigate will automatically assign the `detect` role, as described in the [camera documentation](../configuration/cameras). + +However, this is less efficient as the same high-resolution stream must be decoded for both purposes. + +## 4. Motion Detection Tuning + +**Priority: High** + +Motion detection acts as the critical first-line gatekeeper for object detection in Frigate. It determines when and where to run expensive AI inference, as explained in the [motion detection documentation](../configuration/motion_detection). Properly tuned motion detection ensures your detector only analyzes relevant areas, dramatically improving system efficiency. + +### Understanding Motion Detection's Role + +Frigate uses motion detection to identify areas of the frame worth checking with object detection: + +1. **Motion Detection:** Analyzes pixel changes between frames to detect movement +2. **Motion Boxes:** Groups nearby motion into rectangles (red boxes in debug view) +3. **Region Creation:** Creates square regions around motion areas (green boxes) +4. **Object Detection:** Runs AI inference only on these regions + +**Why this matters:** Without motion detection, Frigate would need to run object detection on every frame of every camera continuously, which would be computationally impossible for most systems. Motion detection reduces detector workload by 90-95% in typical scenarios. + +### Motion Masks + +Motion masks prevent specific areas from triggering object detection. This is one of the most impactful optimizations you can make, as described in the [mask documentation](../configuration/masks) and [motion detection documentation](../configuration/motion_detection). + +**What to mask:** + +- Camera timestamps and on-screen displays +- Trees, bushes, and vegetation that move in wind +- Flags, banners, or other constantly moving objects +- Sky and clouds +- Rooftops and distant background areas +- Reflective surfaces with changing light + +**What NOT to mask:** + +- Areas where you want to detect objects, even if you don't want alerts there +- Paths objects might take to reach areas of interest +- Transition zones between monitored areas, as noted in the [mask documentation](../configuration/masks). + +**Critical distinction - Motion masks vs. Object filter masks:** Motion masks only prevent motion from triggering detection. They do NOT prevent objects from being detected if object detection was started due to motion in unmasked areas. If you want to filter out false positive detections in specific locations, use object filter masks instead (covered in Section 5). + +**Over-masking warning:** Be cautious about masking too much. Motion is used during object tracking to refine the object detection area in the next frame. Over-masking will make it more difficult for objects to be tracked. If an object walks from an unmasked area into a fully masked area, they essentially disappear and will be picked up as a "new" object if they leave the masked area. + +### Motion Sensitivity Parameters + +Frigate provides several parameters to fine-tune motion detection sensitivity. These work together to determine what constitutes "motion" worthy of triggering object detection. + +**threshold:** The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion, as described in the [configuration reference](../configuration/reference). + +```yaml +motion: + threshold: 30 +``` + +- **Range:** 1-255 +- **Default:** 30 +- **Higher values:** Make motion detection less sensitive (fewer false triggers) +- **Lower values:** Make motion detection more sensitive (may trigger on minor changes) +- **When to increase:** Cameras with noisy sensors, busy backgrounds, or frequent lighting changes +- **When to decrease:** Missing slow-moving objects or subtle motion + +**contour_area:** Minimum size in pixels in the resized motion image that counts as motion. + +```yaml +motion: + contour_area: 10 +``` + +- **Default:** 10 +- **High sensitivity:** 10 (detects small movements like insects, leaves) +- **Medium sensitivity:** 30 (typical people and vehicle motion) +- **Low sensitivity:** 50 (only larger movements) +- **When to increase:** Reduce false triggers from small movements like insects or debris +- **When to decrease:** Ensure detection of small objects or distant motion + +### Improve Contrast + +The improve_contrast setting enables dynamic contrast improvement to help with challenging lighting conditions: + +```yaml +motion: + improve_contrast: True +``` + +**Benefits:** + +- Helps improve night detections in low-light conditions +- Makes subtle motion more visible to the motion detection algorithm +- Particularly useful for cameras without good low-light performance + +**Trade-offs:** + +- May increase motion sensitivity during daytime +- Can cause more false triggers in high-contrast scenes +- Slightly increases CPU usage for motion processing + +**When to enable:** + +- Cameras struggling with night detection +- Low-quality cameras with poor dynamic range +- Scenes with challenging lighting (backlighting, deep shadows) + +**When to disable:** + +- Already experiencing too many false motion triggers +- High-quality cameras with good low-light performance +- CPU usage is a concern + +### Debugging Motion Detection + +To visualize and tune motion detection: + +1. Navigate to your camera in the Frigate UI +2. Select "Debug" at from the settings cog +3. Enable "Motion boxes" in the options +4. Watch for red boxes showing detected motion areas + +**What to look for:** + +- **Constant motion boxes:** Indicates areas that should be masked +- **No motion boxes when objects move:** Motion detection may be too insensitive +- **Motion boxes everywhere:** Motion detection is too sensitive +- **Boxes not covering moving objects:** May need to adjust contour_area or threshold + +**Iterative tuning process:** + +1. Start with default settings +2. Identify problem areas using debug view +3. Add motion masks for constantly triggering areas +4. Adjust sensitivity parameters if needed +5. Test with real-world scenarios +6. Repeat until optimized + +## 5. Object Detection Optimization + +**Priority: Medium-High** + +Once motion detection has identified areas worth analyzing, object detection determines what's actually in those regions. Proper configuration of object detection filters and tracking parameters ensures Frigate only tracks relevant objects and ignores false positives. + +### Object Filters + +Object filters allow you to ignore detections based on size, shape, confidence score, and location. These filters run after the AI model has identified an object, preventing Frigate from wasting resources tracking objects you don't care about. + +**Basic filter configuration:** + +```yaml +objects: + track: + - person + filters: + person: + min_area: 5000 + max_area: 100000 + min_score: 0.5 + threshold: 0.7 +``` + +**min_area:** Minimum size of the bounding box for the detected object. + +- **Default:** 0 (no minimum) +- **Can be specified:** As pixels (width × height) or as a decimal percentage of frame (0.000001 to 0.99) +- **Example:** 5000 pixels filters out very small detections +- **Use case:** Ignore distant objects, insects, or detection artifacts +- **Tuning tip:** Check the bounding box size in the UI for objects you want to track, then set min_area slightly below that + +**max_area:** Maximum size of the bounding box for the detected object. + +- **Default:** 24000000 (essentially unlimited) +- **Can be specified:** As pixels or as a decimal percentage of frame +- **Example:** 100000 pixels prevents tracking objects that fill most of the frame +- **Use case:** Filter out false positives from camera adjustments, lens flares, or objects too close to camera +- **Tuning tip:** Objects filling >50% of frame are often false positives or not useful to track + +**min_ratio:** Minimum width/height ratio of the bounding box. + +```yaml +objects: + filters: + person: + min_ratio: 0.5 +``` + +- **Default:** 0 (no minimum) +- **Purpose:** Filter objects based on aspect ratio +- **Example:** 0.5 means width must be at least half the height +- **Use case:** People are typically taller than wide; very wide detections are likely false positives +- **Tuning tip:** Calculate ratio of typical objects (person standing ≈ 0.3-0.6, car ≈ 1.5-3.0) + +**max_ratio:** Maximum width/height ratio of the bounding box. + +```yaml +objects: + filters: + person: + max_ratio: 2.0 +``` + +- **Default:** 24000000 (essentially unlimited) +- **Example:** 2.0 means width can't exceed 2× the height +- **Use case:** Filter out horizontally stretched false positives +- **Tuning tip:** Adjust based on expected object orientations in your scene + +**min_score:** Minimum score for the object to initiate tracking. + +```yaml +objects: + filters: + person: + min_score: 0.5 +``` + +- **Default:** 0.5 (50% confidence) +- **Purpose:** Initial confidence threshold to start tracking an object +- **Range:** 0.0 to 1.0 +- **Higher values:** Fewer false positives, but may miss valid detections +- **Lower values:** More detections captured, but more false positives +- **Tuning tip:** Start at 0.5, increase if too many false positives + +**threshold:** Minimum decimal percentage for tracked object's computed score to be considered a true positive. + +```yaml +objects: + filters: + person: + threshold: 0.7 +``` + +- **Default:** 0.7 (70% confidence) +- **Purpose:** Final threshold for an object to be saved/alerted +- **How it works:** Frigate tracks objects over multiple frames and calculates a median score; this must exceed the threshold +- **Difference from min_score:** min_score starts tracking, threshold confirms it's real +- **Minimum frames:** Takes at least 3 frames for Frigate to determine if an object meets the threshold +- **Tuning tip:** This is your primary tool for reducing false positives without missing detections + +### Object Filter Masks + +Object filter masks are different from motion masks. They filter out false positives for specific object types based on location, evaluated at the bottom center of the detected object's bounding box, as described in the [mask documentation](../configuration/masks). + +See the [mask documentation](../configuration/masks) and [configuration reference](../configuration/reference) for more details. + +**Global object mask:** The mask under the `objects` section applies to all tracked object types and is combined with object-specific masks. + +**Object-specific masks:** Masks under specific object types (like `person`) only apply to that object type. + +**Use cases:** + +- Remove false positives in fixed locations (like a tree base frequently detected as a person) +- Prevent animals from being detected in areas they can't physically access + +**How it works:** The bottom center of the bounding box is evaluated against the mask. If it falls in a masked area, the detection is assumed to be a false positive and ignored, as explained in the [mask documentation](../configuration/masks). + +**Creating object filter masks:** Use the same mask creator tool in the UI (Settings → Mask / zone editor), but select "Object mask" instead of "Motion mask." + +**Example scenario:** A tree base is frequently detected as a person. Use the Frigate UI to create a precise object filter mask over the location where the bottom center of the false detection typically appears. + +## 6. Recording Configuration + +**Priority: Medium** + +Recording configuration impacts both storage requirements and system performance. Properly configured recording ensures you capture the footage you need without wasting disk space or CPU cycles. + +### Retention Modes + +Frigate offers three retention modes that determine which recording segments are kept. Each mode has different storage and CPU implications: + +**all:** Save all recording segments regardless of activity. + +```yaml +record: + retain: + days: 7 + mode: all +``` + +- **Storage impact:** Highest - stores everything continuously +- **CPU impact:** Moderate - still needs to manage and clean up old segments +- **Use case:** Critical areas requiring complete coverage, legal requirements +- **Disk usage:** ~1-3 GB per camera per day (depending on resolution/bitrate) + +**motion:** Save all recording segments with any detected motion. + +```yaml +record: + retain: + days: 7 + mode: motion +``` + +- **Storage impact:** Medium - only stores when motion detected +- **CPU impact:** Low - fewer segments to manage +- **Use case:** Most common configuration, balances coverage with storage +- **Disk usage:** ~200-800 MB per camera per day (depending on activity) +- **Note:** Motion detection must be enabled for this mode to work + +**active_objects:** Save all recording segments with active/moving objects. + +```yaml +record: + retain: + days: 7 + mode: active_objects +``` + +- **Storage impact:** Lowest - only stores when tracked objects are moving +- **CPU impact:** Lowest - minimal segments to manage +- **Use case:** Storage-constrained installations, focus on object events only +- **Disk usage:** ~50-300 MB per camera per day (depending on object activity) +- **Note:** Most efficient option but may miss motion-only events + +**Important consideration:** If the retain mode for the camera is more restrictive than modes configured for alerts/detections, the segments will already be gone by the time those modes are applied. For example, if the camera retain mode is "motion", segments without motion are never stored, so setting alerts mode to "all" won't bring them back. + +### Recording Enable/Disable + +Recording must be enabled in the configuration to function: + +```yaml +record: + enabled: False +``` + +- **Default:** False (disabled) +- **Critical warning:** If recording is disabled in the config, turning it on via the UI or MQTT later will have no effect +- **Must be set to True:** To enable recording functionality +- **Can be overridden:** At the camera level for selective recording + +**Camera-level override:** + +```yaml +cameras: + front_door: + record: + enabled: True + retain: + days: 30 +``` + +## 7. System-Level Optimizations + +**Priority: Medium** + +Beyond camera and detection configuration, several system-level settings can significantly impact Frigate's performance and reliability. + +### Shared Memory (shm-size) + +Frigate uses shared memory to pass frames between processes efficiently. Insufficient shared memory will cause Frigate to crash with bus errors. + +**Docker Compose configuration:** + +```yaml +services: + frigate: + shm_size: "256mb" +``` + +**General guidelines:** + +- **Minimum:** 128 MB for single camera installations +- **Typical:** 256 MB for 2-5 cameras +- **Large installations:** 512 MB - 1 GB for 10+ cameras or 4K streams +- **Symptoms of insufficient shm:** Bus error crashes, "Failed to create shared memory" errors + +The Frigate UI and logs will warn you if your `shm_size` is too low. + +### Tmpfs for Cache + +Using a tmpfs (RAM disk) for Frigate's cache reduces disk wear and improves performance. + +**Configuration:** + +```yaml +volumes: + - type: tmpfs + target: /tmp/cache + tmpfs: + size: 1000000000 # 1 GB +``` + +**Benefits:** + +- **Faster I/O:** RAM is orders of magnitude faster than disk +- **Reduced disk wear:** Eliminates constant write cycles to storage media +- **Lower latency:** Improves responsiveness for live viewing and clip generation +- **Extended hardware life:** Critical for SD cards and extends SSD lifespan + +**Size recommendations:** + +- **Minimum:** 500 MB for very small installations +- **Typical:** 1-2 GB for most installations +- **Large installations:** 4-8 GB for many cameras or high-resolution streams +- **Calculation:** Roughly 200-500 MB per camera depending on resolution + +**Trade-offs:** + +- **RAM usage:** Reduces available system RAM +- **Volatility:** Cache is lost on restart (not an issue for temporary cache) +- **System requirements:** Ensure sufficient RAM for OS, Frigate, and tmpfs + +## 8. Understanding Resource Usage + +**Priority: Educational** + +Understanding how Frigate uses system resources helps you identify bottlenecks and optimize configuration effectively. + +### CPU Usage Breakdown + +Frigate's CPU usage comes from several distinct processes: + +**Video Decoding:** + +- **Primary CPU consumer** in most installations +- Decompresses video streams from cameras +- Scales with: resolution, frame rate, number of cameras, codec complexity +- **Mitigation:** Hardware acceleration (see Section 1) + +**Motion Detection:** + +- Analyzes pixel changes between frames +- Runs on every frame of every camera +- Scales with: detect resolution, frame rate, motion sensitivity settings +- **Mitigation:** Lower detect resolution, reduce FPS, use motion masks + +**Detector CPU**: + +- Prepares model inputs for inference on your object detection hardware +- Runs when motion is detected +- Scales with: frame rate, number of cameras, complexity of the object detection model +- **Mitigation**: Reduce motion sensitivity and detect fps, switch to a less complex model, add a second detector instance (`openvino` or `onnx`) + +**FFmpeg Process Management:** + +- Manages camera connections and stream handling +- Generally low overhead +- Can spike during reconnection attempts +- **Mitigation:** Stable network + +**Database Operations:** + +- Event storage and retrieval +- Generally minimal impact +- Can increase with very high event rates +- **Mitigation:** Proper retention settings, SSD storage + +**Enrichments**: + +- Semantic Search, Face recognition, LPR, custom classification +- Semantic Search generates embeddings for tracked object thumbnails. If run on the CPU, this may significantly drive up CPU usage +- Face recognition, LPR, and custom classification are usually lightweight, but if many faces, plates, or objects are detected, this can slow the pipeline +- **Mitigation**: Run Semantic Search on a GPU and/or disable other enrichments + +**GenAI**: + +- Generative AI for tracked object and review item descriptions +- Local (Ollama) and cloud providers are available. Using a local provider without a powerful GPU will significantly increase resource usage +- **Mitigation**: Offload to a cloud provider or use a powerful GPU for inference + +**Web Interface:** + +- Serving live streams and UI +- Increases with number of concurrent viewers +- WebRTC/MSE streaming via go2rtc +- **Mitigation:** Limit concurrent viewers, use sub-streams for viewing + +### Resolution Impact + +Resolution affects both CPU workload exponentially: + +**CPU (Decoding) Impact:** + +- **720p (1280×720):** Baseline reference point +- **1080p (1920×1080):** ~2.25× the pixels, roughly 2× the CPU load +- **4K (3840×2160):** ~9× the pixels of 720p, roughly 6-8× the CPU load +- **Why not linear:** Codec overhead, memory bandwidth, and cache efficiency + +**Example comparison:** + +- Decoding 720p stream: 15% CPU per camera +- Decoding 1080p stream: 30-35% CPU per camera +- Decoding 4K stream: 90-120% CPU per camera (may require multiple cores) + +**Detector (Inference) Impact:** + +The detector must analyze more pixels, but the impact depends on your model: + +- **Fixed input models (320×320, 640×640):** Frigate resizes the frame to match model input, so resolution has minimal direct impact on inference time +- **Indirect impact:** Higher resolution means larger motion regions, potentially more detection runs +- **Region extraction:** Larger source frames require more CPU to crop detection regions + +**Practical implications:** + +- Use lowest resolution that meets detection needs for detect stream +- Save high resolution for record stream only +- 720p is the sweet spot for most detection scenarios +- 1080p only if detecting small/distant objects +- 4K almost never necessary for detection + +### Memory Usage + +Frigate's memory usage comes from several sources: + +**Frame Buffers:** + +- Raw decoded frames stored in shared memory +- Size: resolution × color depth × number of buffered frames +- Example: 1920×1080×3 bytes × 10 frames = ~62 MB per camera + +**Object Tracking:** + +- Metadata for each tracked object +- Minimal per object (~1-2 KB) +- Scales with number of simultaneous tracked objects + +**Database Cache:** + +- SQLite database held in memory for performance +- Typically 50-200 MB depending on history + +**Python Process:** + +- Frigate application overhead +- Generally 200-500 MB base usage +- Increases slightly with number of cameras + +**Detector Model:** + +- AI model loaded into detector memory +- OpenVINO/ONNX: 50-200 MB depending on model +- GPU VRAM: 500 MB - 2 GB depending on model and batch size + +**Enrichments:** + +- Semantic Search, LPR, Face Recognition, and custom classification models loaded into memory +- Depending on the enrichment, these models can be quite large. Semantic Search: ~4-6GB, Face Recognition, LPR, custom classification: ~500MB-1GB + +**Total memory recommendations:** + +- **Minimal (1-2 cameras):** 2 GB RAM +- **Small (3-5 cameras):** 4 GB RAM +- **Medium (6-10 cameras):** 8 GB RAM +- **Large (10+ cameras):** 16 GB+ RAM + +### Storage Usage + +Storage requirements vary dramatically based on configuration: + +**Recording storage by mode:** + +**Mode: all (continuous recording)** + +- 1080p @ 15 FPS, 2 Mbps: ~21 GB per camera per day +- 1080p @ 30 FPS, 4 Mbps: ~43 GB per camera per day +- 4K @ 15 FPS, 8 Mbps: ~86 GB per camera per day +- 4K @ 30 FPS, 16 Mbps: ~173 GB per camera per day + +**Mode: motion** + +- Depends heavily on activity level +- Low activity (suburban home): 3–8 GB per camera per day +- Medium activity (urban home): 8–20 GB per camera per day +- High activity (retail, busy street): 20–50 GB per camera per day + +**Mode: active_objects** + +- Most efficient, only records when objects detected +- Low activity: 1–3 GB per camera per day +- Medium activity: 3–8 GB per camera per day +- High activity: 8–15 GB per camera per day + +**Database storage:** + +- Grows with number of events +- Typical: 100–300 MB for months of events +- Cleaned automatically based on retention settings + +**Clips and snapshots:** + +- Stored separately from recordings +- Size depends on alert/detection frequency +- Typical: 100–500 MB per camera per month + +**Storage planning example:** + +``` +5 cameras, 1080p recording, motion mode, 14 day retention: +5 cameras × 10 GB/day × 14 days = 700 GB minimum +Add 30% buffer: ~900 GB recommended +``` + +### Network Bandwidth + +Network bandwidth impacts both camera-to-Frigate and Frigate-to-client connections: + +**Camera to Frigate (inbound):** + +- Depends on camera bitrate and number of streams +- 1080p @ 2 Mbps: 2 Mbps per stream +- If using separate detect and record streams: sum both streams +- Example: 5 cameras × (2 Mbps detect + 4 Mbps record) = 30 Mbps total + +**Frigate to Clients (outbound):** + +- Live viewing via WebRTC/MSE through go2rtc +- Each viewer consumes full stream bandwidth +- Example: 3 viewers watching 1080p @ 4 Mbps = 12 Mbps outbound +- Mitigation: Use lower bitrate sub-streams for live viewing + +**MQTT traffic:** + +- Minimal, typically <1 Mbps even with many cameras +- Event notifications and state updates + +**API traffic:** + +- Varies with UI usage and integrations +- Generally negligible compared to video streams + +### Identifying Bottlenecks + +**Symptoms and causes:** + +**High CPU usage:** + +- **Cause:** Video decoding without hardware acceleration +- **Solution:** Enable hwaccel_args (Section 1) +- **Cause:** High detect resolution or FPS +- **Solution:** Lower detect resolution/FPS (Section 3) + +**Detector CPU Usage at 100%:** + +- **Cause:** Too many cameras or too much motion +- **Solution:** Add second detector instance (Section 2) +- **Cause:** Model too complex for hardware +- **Solution:** Use smaller/faster model + +**Detection FPS below configured FPS:** + +- **Cause:** System can't keep up with configured rate +- **Solution:** Reduce detect FPS, add hardware acceleration, add detector +- **Cause:** Excessive motion triggering constant detection +- **Solution:** Add motion masks (Section 4) + +**Recording gaps or stuttering:** + +- **Cause:** Insufficient disk I/O performance +- **Solution:** Use faster storage (SSD), reduce recording resolution +- **Cause:** Network issues with camera +- **Solution:** Check network stability + +**Out of memory crashes:** + +- **Cause:** Insufficient shared memory +- **Solution:** Increase shm_size (Section 7) +- **Cause:** Too many simultaneous high-resolution streams +- **Solution:** Reduce number of cameras or resolution + +**Slow UI/high latency:** + +- **Cause:** Too many concurrent viewers +- **Solution:** Limit viewers, use sub-streams for viewing +- **Cause:** Slow database storage +- **Solution:** Move database to SSD + +## 9. Monitoring and Troubleshooting + +**Priority: Medium** + +Effective monitoring helps you identify performance issues before they impact your system's reliability. + +### Frigate UI Metrics + +The Frigate web interface provides real-time performance metrics: + +**System Stats (Debug page):** + +- **CPU Usage:** Overall system CPU percentage +- **Detector Inference Speed:** Milliseconds per detection +- **Detection FPS:** Actual detections per second being processed +- **Process FPS:** Frames being processed per second +- **Skipped FPS:** Frames skipped due to system overload + +**Camera-specific stats:** + +- **Camera FPS:** Actual frame rate from camera +- **Detection FPS:** Rate at which this camera's frames are being analyzed +- **Process FPS:** Rate at which frames are being decoded +- **Skipped FPS:** Frames dropped due to processing delays + +**What to monitor:** + +- **Detection FPS approaching detector capacity:** Time to add detector +- **Skipped FPS > 0:** System falling behind, needs optimization +- **Process FPS < Camera FPS:** Decoding bottleneck, enable hwaccel +- **Inference speed increasing:** Detector struggling, may need upgrade + +### Log Analysis + +Frigate logs provide detailed information about system behavior and errors: + +**Key log messages to watch for:** + +**"FFmpeg process crashed unexpectedly"** + +- **Cause:** Camera stream issues, network problems, or invalid FFmpeg args +- **Solution:** Check camera accessibility, verify FFmpeg configuration +- **Debug:** Enable FFmpeg logging to see detailed error + +**"Detection appears to be stuck"** + +- **Cause:** Detector process hung or overloaded +- **Solution:** Restart Frigate, check detector hardware, add second detector +- **Prevention:** Monitor detector usage, don't exceed capacity + +**"Unable to read frames from camera"** + +- **Cause:** Network issues, camera offline, or authentication failure +- **Solution:** Verify camera network connection, check credentials +- **Note:** Normal during camera reboots or brief outages + +**"Insufficient shared memory"** + +- **Cause:** shm_size too small for configured cameras +- **Solution:** Increase shm_size in Docker configuration (Section 7) +- **Critical:** Will cause crashes if not addressed + +**"Skipping frame, detection queue is full"** + +- **Cause:** Detector can't keep up with detection requests +- **Solution:** Add second detector, reduce detect FPS, add motion masks +- **Impact:** Missing potential detections during high activity + +### Setting Log Levels + +Adjust log verbosity for troubleshooting: + +```yaml +logger: + default: info + logs: + frigate.event: debug + frigate.record: debug +``` + +**Log levels:** + +- **error:** Only critical errors (minimal logging) +- **warning:** Errors and warnings (recommended for production) +- **info:** General information (default, good balance) +- **debug:** Detailed debugging information (troubleshooting only) + +**Component-specific logging:** + +- `frigate.event`: Object detection and tracking events +- `frigate.record`: Recording and retention operations +- `frigate.mqtt`: MQTT communication +- `frigate.object_processing`: Object detection processing +- `frigate.motion`: Motion detection + +**Best practices:** + +- Use `info` for default level in production +- Enable `debug` only for specific components when troubleshooting +- Excessive debug logging can impact performance +- Review logs regularly for warnings and errors + +### Debug View + +The debug view in Frigate UI is essential for optimization: + +**Accessing debug view:** + +1. Navigate to camera in Frigate UI +2. Click "Debug" from the Settings cog +3. Enable visualization options + +**Debug overlays:** + +**Motion boxes (red):** + +- Shows areas where motion was detected +- Helps identify areas to mask +- Reveals motion detection sensitivity issues + +**Regions (green):** + +- Shows areas sent to detector for object detection +- Should correspond to motion boxes +- Large regions indicate inefficient detection + +**Objects (blue):** + +- Shows detected objects with labels and confidence scores +- Helps tune min_score and threshold values +- Reveals false positives and missed detections + +**Zones (purple):** + +- Shows defined zones if configured +- Helps verify zone coverage +- Useful for zone-based filtering + +**Using debug view for optimization:** + +1. Enable motion boxes to identify constant motion areas +2. Add motion masks for these areas +3. Enable objects to see detection confidence scores +4. Adjust threshold if too many low-confidence detections +5. Verify regions aren't excessively large + +## 10. Hardware Upgrade Path + +**Priority: Reference** + +When software optimization isn't enough, hardware upgrades provide the next performance tier. + +### Upgrade Priority Order + +**1. Add a dedicated detector (Highest impact)** + +If using a CPU detector, adding any dedicated detector provides massive improvement: + +**Hailo-8L M.2:** + +- Similar performance to Coral +- M.2 form factor for cleaner installation +- Good alternative if Coral unavailable + +**Intel Arc A310/A380:** + +- Excellent for OpenVINO +- Also provides hardware decode acceleration +- Supports 10-20 cameras +- Dual benefit: detection + hwaccel + +**2. Enable hardware decode acceleration (High impact)** + +If using CPU for video decoding: + +**Intel CPU with QuickSync (6th gen+):** + +- Already have it if using Intel CPU +- Just enable VA-API in configuration +- Reduces CPU usage 50-80% +- Supports many simultaneous streams + +**Add discrete GPU for decode:** + +- Intel Arc A310: excellent decode + OpenVINO +- NVIDIA GTX 1650: good decode + TensorRT option +- Dedicated decode hardware frees CPU + +**3. Add second detector instance (Medium-high impact)** + +When single detector at capacity: + +**Requirements:** + +- Sufficient GPU VRAM (for GPU detectors) +- Or second physical detector (second `openvino` or `onnx` instance) +- Minimal configuration change, but not supported by all detector types + +**Benefit:** + +- Doubles detection capacity +- Handles twice as many cameras +- Reduces detection latency during peaks + +**4. Upgrade CPU (Medium impact)** + +If decode is bottleneck even with hwaccel: + +**Intel 12th gen+ with better QuickSync:** + +- Improved decode efficiency +- More streams per CPU +- Better integrated GPU performance + +**Higher core count:** + +- More parallel decode streams +- Better for many cameras +- Diminishing returns beyond 8 cores for Frigate + +**5. Upgrade storage (Low-medium impact)** + +If experiencing recording issues: + +**NVMe SSD:** + +- Fastest I/O for database and recordings +- Reduces latency for clip generation +- Essential for 10+ cameras + +**Dedicated recording drive:** + +- Separate OS/database from recordings +- Prevents recording I/O from impacting system +- Can use slower/cheaper storage for recordings + +**6. Increase RAM (Low impact)** + +Usually not the bottleneck, but needed if: + +- Running many other services +- Using very large tmpfs cache +- 20+ cameras with high resolution + +**Recommendations:** + +- 4 GB minimum for Frigate +- 8 GB comfortable for most installations +- 16 GB for large installations or shared server + +### Hardware Recommendations by Scale + +**Small Installation (1-3 cameras):** + +- **Minimum:** Raspberry Pi 4 (4GB) + Coral +- **Better:** Intel N100 mini PC (built-in QuickSync + Coral) +- **Best:** Intel N100 + Coral + +**Medium Installation (4-8 cameras):** + +- **Minimum:** Intel 8th gen+ CPU +- **Better:** Intel 12th gen+ with integrated GPU + Hailo or MemryX +- **Best:** Intel with Arc A310 (decode + OpenVINO or Hailo/MemryX) + + **Large Installation (9-15 cameras):** + +- **Minimum:** Intel 10th gen+ + OpenVINO or Hailo or MemryX +- **Better:** Intel 12th gen+ + Hailo-8 or MemryX +- **Best:** Intel with Arc A380 (decode + OpenVINO) or NVIDIA RTX 3060 (decode + ONNX) + +**Very Large Installation (16+ cameras):** + +- **Minimum:** Intel 12th gen+ + 2× Hailo-8L +- **Better:** Dedicated server with NVIDIA RTX 3060/4060 + TensorRT +- **Best:** Server-grade Intel with Arc A770 or NVIDIA RTX 4070 + +### When to Upgrade vs. Optimize + +**Optimize first if:** + +- CPU usage <80% average +- Detector usage <85% +- No skipped frames +- Sufficient RAM available +- Storage not full + +**Consider hardware upgrade if:** + +- CPU consistently >90% even with hwaccel enabled +- Detector at 100% with optimized config +- Skipped frames even with reduced FPS +- Out of memory errors despite proper shm_size +- Cannot add more cameras without degradation + +**Optimization checklist before upgrading:** + +1. ✓ Hardware acceleration enabled? See the [hardware acceleration documentation](../configuration/hardware_acceleration_video). +2. ✓ Detect resolution ≤720p? See the [getting started guide](../guides/getting_started). +3. ✓ Detect FPS ≤5? +4. ✓ Motion masks configured? +5. ✓ Separate detect/record streams? +6. ✓ Object filters tuned? See the [configuration reference](../configuration/reference). +7. ✓ Using efficient retention mode? +8. ✓ Tmpfs cache configured? See the [getting started guide](../guides/getting_started). + +If all optimizations applied and still insufficient, hardware upgrade is justified. + +## 11. Common Performance Issues and Solutions + +### Issue: High CPU Usage Despite Hardware Acceleration + +**Symptoms:** + +- CPU at 80-100% even with hwaccel enabled +- FFmpeg processes consuming excessive CPU +- System becoming unresponsive + +**Diagnostic steps:** + +1. Verify hwaccel actually working (check logs for errors) in the [hardware acceleration documentation](../configuration/hardware_acceleration_video). +2. Confirm GPU device accessible in container +3. Check if using correct preset for your hardware +4. Verify camera streams are compatible codec (H.264/H.265) + +**Solutions:** + +- Ensure Docker container has GPU device access +- Try different hwaccel preset if auto-detection fails +- Check camera codec compatibility +- Reduce number of simultaneous streams +- Lower detect resolution further + +### Issue: Detector CPU Usage at 100% + +**Symptoms:** + +- Detection FPS below configured FPS +- "Skippped detections" noted in Camera Metrics +- Delayed object detection in UI +- Objects appearing/disappearing erratically + +**Diagnostic steps:** + +1. Check Detector CPU usage in Frigate UI System Metrics +2. Review inference speed (should be fairly consistent) +3. Count total detect FPS across all cameras +4. Calculate if exceeding detector capacity + +**Solutions:** + +- Add second detector instance as described in the [detector documentation](../configuration/object_detectors). +- Reduce detect FPS from 5 to 3 in the [getting started guide](../guides/getting_started). +- Add motion masks to reduce detection triggers +- Increase object filter thresholds in the [configuration reference](../configuration/reference). +- Consider upgrading detector hardware as described in the [detector documentation](../configuration/object_detectors). + +### Issue: Recording Gaps or Missing Footage + +**Symptoms:** + +- Gaps in timeline +- "FFmpeg process crashed" errors +- Intermittent camera connectivity + +**Diagnostic steps:** + +1. Check FFmpeg logs for specific errors +2. Verify network stability to cameras +3. Check storage space and I/O performance +4. Review retry_interval setting in the [configuration reference](../configuration/reference). + +**Solutions:** + +- Increase retry_interval for wireless cameras +- Verify network infrastructure (switches, WiFi) +- Check camera firmware for known issues +- Ensure sufficient storage and fast enough disk +- Consider wired connection for critical cameras + +### Issue: Out of Memory or Bus Errors + +**Symptoms:** + +- Frigate crashes with bus error +- "Failed to create shared memory" errors +- Container restarts frequently +- System becomes unresponsive + +**Diagnostic steps:** + +1. Check configured shm_size in the [getting started guide](../guides/getting_started). +2. Calculate actual requirements based on cameras +3. Review system memory usage +4. Check for memory leaks (increasing over time) + +**Solutions:** + +- Increase shm_size in Docker configuration +- Add tmpfs volume for cache +- Reduce number of cameras or resolution +- Ensure sufficient system RAM +- Restart Frigate to clear any memory leaks + +### Issue: Slow UI or High Latency + +**Symptoms:** + +- UI takes long to load +- Live view stuttering or delayed +- Clip playback buffering +- Timeline loading slowly + +**Diagnostic steps:** + +1. Check number of concurrent viewers +2. Review network bandwidth usage +3. Check database size and location +4. Verify go2rtc performance + +**Solutions:** + +- Limit concurrent viewers +- Use sub-streams for live viewing +- Move database to SSD +- Optimize go2rtc configuration +- Reduce recording retention to shrink database +- Use WebRTC instead of MSE for lower latency + +## Conclusion + +Optimizing Frigate performance requires a systematic approach across multiple areas. The highest-impact optimizations are: + +1. **Enable hardware acceleration for video decoding** in the [hardware acceleration documentation](../configuration/hardware_acceleration_video) - Reduces CPU usage by 50-80% +2. **Use a dedicated detector** in the [detector documentation](../configuration/object_detectors) - 10-20× faster than CPU detection +3. **Configure appropriate detect resolution and FPS** in the [getting started guide](../guides/getting_started) - Balance accuracy with resources +4. **Implement motion masks**- Reduce unnecessary detection cycles +5. **Use separate detect and record streams**- Optimize each stream for its purpose + +Start with these foundational optimizations, then fine-tune based on your specific hardware and requirements. Monitor system metrics regularly to identify bottlenecks and validate improvements. When software optimization reaches its limits, strategic hardware upgrades provide the next performance tier. + +Remember that every installation is unique - what works optimally for one setup may need adjustment for another. Use the debug view, logs, and system metrics to guide your optimization decisions rather than blindly copying configurations. diff --git a/docs/sidebars.ts b/docs/sidebars.ts index 4c8effeec..6d4c0c981 100644 --- a/docs/sidebars.ts +++ b/docs/sidebars.ts @@ -12,6 +12,7 @@ const sidebars: SidebarsConfig = { "frigate/updating", "frigate/camera_setup", "frigate/video_pipeline", + "frigate/optimizing_performance", "frigate/glossary", ], Guides: [ From 3c45fe58012dd9657bcc1a84a262736ef909d2ca Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 1 Jan 2026 13:38:16 -0600 Subject: [PATCH 05/17] tweaks --- docs/docs/frigate/optimizing_performance.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/docs/frigate/optimizing_performance.md b/docs/docs/frigate/optimizing_performance.md index 04a89900d..9748ee71c 100644 --- a/docs/docs/frigate/optimizing_performance.md +++ b/docs/docs/frigate/optimizing_performance.md @@ -1254,7 +1254,6 @@ If all optimizations applied and still insufficient, hardware upgrade is justifi - Move database to SSD - Optimize go2rtc configuration - Reduce recording retention to shrink database -- Use WebRTC instead of MSE for lower latency ## Conclusion From 3e9b3fcc7d8760ddfb937a3a473f8d6d70eaacb6 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 1 Jan 2026 13:48:03 -0600 Subject: [PATCH 06/17] fix character issue --- docs/docs/frigate/optimizing_performance.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/frigate/optimizing_performance.md b/docs/docs/frigate/optimizing_performance.md index 9748ee71c..9b60d29c4 100644 --- a/docs/docs/frigate/optimizing_performance.md +++ b/docs/docs/frigate/optimizing_performance.md @@ -774,7 +774,7 @@ Network bandwidth impacts both camera-to-Frigate and Frigate-to-client connectio **MQTT traffic:** -- Minimal, typically <1 Mbps even with many cameras +- Minimal, typically less than 1 Mbps even with many cameras - Event notifications and state updates **API traffic:** From 39eb913b352742680f8cc156e60d73cb690b5145 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 1 Jan 2026 13:50:32 -0600 Subject: [PATCH 07/17] fix more characters --- docs/docs/frigate/optimizing_performance.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/docs/frigate/optimizing_performance.md b/docs/docs/frigate/optimizing_performance.md index 9b60d29c4..71c6041fe 100644 --- a/docs/docs/frigate/optimizing_performance.md +++ b/docs/docs/frigate/optimizing_performance.md @@ -1108,8 +1108,8 @@ Usually not the bottleneck, but needed if: **Optimize first if:** -- CPU usage <80% average -- Detector usage <85% +- CPU usage less than 80% average +- Detector usage less than 85% - No skipped frames - Sufficient RAM available - Storage not full From 5c91523d111ab0783e63e726776b556c532701d5 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 1 Jan 2026 13:58:05 -0600 Subject: [PATCH 08/17] fix links --- docs/docs/frigate/optimizing_performance.md | 62 ++++++++++----------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/docs/docs/frigate/optimizing_performance.md b/docs/docs/frigate/optimizing_performance.md index 71c6041fe..cc75acf69 100644 --- a/docs/docs/frigate/optimizing_performance.md +++ b/docs/docs/frigate/optimizing_performance.md @@ -11,7 +11,7 @@ Optimizing Frigate's performance is key to ensuring a responsive system and mini **Priority: Critical** -Video decompression is one of the most CPU-intensive tasks in Frigate. While an AI accelerator handles object detection, it does not assist with decoding video streams, as described in the [getting started guide](../guides/getting_started). Hardware acceleration (hwaccel) offloads this work to your GPU or specialized video decode hardware, significantly reducing CPU usage and enabling you to support more cameras on the same hardware. +Video decompression is one of the most CPU-intensive tasks in Frigate. While an AI accelerator handles object detection, it does not assist with decoding video streams, as described in the [getting started guide](../guides/getting_started.md). Hardware acceleration (hwaccel) offloads this work to your GPU or specialized video decode hardware, significantly reducing CPU usage and enabling you to support more cameras on the same hardware. ### Key Concepts @@ -26,7 +26,7 @@ Video decompression is one of the most CPU-intensive tasks in Frigate. While an ### Configuration -Frigate provides preset configurations for common hardware acceleration scenarios. Set up `hwaccel_args` based on your hardware in your [configuration](../configuration/reference) as described in the [getting started guide](../guides/getting_started). +Frigate provides preset configurations for common hardware acceleration scenarios. Set up `hwaccel_args` based on your hardware in your [configuration](../configuration/reference.md) as described in the [getting started guide](../guides/getting_started.md). ### Troubleshooting Hardware Acceleration @@ -42,17 +42,17 @@ If hardware acceleration isn't working: **Priority: Critical** -Choosing the right detector for your hardware is the single most important factor for detection performance. The detector is responsible for running the AI model that identifies objects in video frames. Different detector types have vastly different performance characteristics and hardware requirements, as detailed in the [detector documentation](../configuration/object_detectors). +Choosing the right detector for your hardware is the single most important factor for detection performance. The detector is responsible for running the AI model that identifies objects in video frames. Different detector types have vastly different performance characteristics and hardware requirements, as detailed in the [detector documentation](../configuration/object_detectors.md). ### Understanding Detector Performance -Frigate uses motion detection as a first-line check before running expensive object detection, as explained in the [motion detection documentation](../configuration/motion_detection). When motion is detected, Frigate creates a "region" (the green boxes in the debug viewer) and sends it to the detector. The detector's inference speed determines how many detections per second your system can handle. +Frigate uses motion detection as a first-line check before running expensive object detection, as explained in the [motion detection documentation](../configuration/motion_detection.md). When motion is detected, Frigate creates a "region" (the green boxes in the debug viewer) and sends it to the detector. The detector's inference speed determines how many detections per second your system can handle. **Calculating Detector Capacity:** Your detector has a finite capacity measured in detections per second. With an inference speed of 10ms, your detector can handle approximately 100 detections per second (1000ms / 10ms = 100).If your cameras collectively require more than this capacity, you'll experience delays, missed detections, or the system will fall behind. ### Choosing the Right Detector -Different detectors have vastly different performance characteristics, as shown in the [detector documentation](../configuration/object_detectors): +Different detectors have vastly different performance characteristics, as shown in the [detector documentation](../configuration/object_detectors.md): **OpenVINO on Intel:** @@ -107,7 +107,7 @@ detectors: ### Model Selection and Optimization -The model you use significantly impacts detector performance. Frigate provides default models optimized for each detector type, but you can customize them as described in the [detector documentation](../configuration/object_detectors). +The model you use significantly impacts detector performance. Frigate provides default models optimized for each detector type, but you can customize them as described in the [detector documentation](../configuration/object_detectors.md). **Model Size Trade-offs:** @@ -118,24 +118,24 @@ The model you use significantly impacts detector performance. Frigate provides d **Priority: High** -Properly configuring your camera streams is essential for optimal performance. Frigate supports multiple input streams per camera, allowing you to use different resolutions and frame rates for different purposes, as explained in the [camera documentation](../configuration/cameras). +Properly configuring your camera streams is essential for optimal performance. Frigate supports multiple input streams per camera, allowing you to use different resolutions and frame rates for different purposes, as explained in the [camera documentation](../configuration/cameras.md). ### Detect Stream Resolution -Your `detect` stream resolution should be just high enough to reliably identify the objects you care about, but no higher. Higher resolutions dramatically increase both CPU (decoding) and detector (inference) workload, as noted in the [getting started guide](../guides/getting_started) and [camera documentation](../configuration/cameras). +Your `detect` stream resolution should be just high enough to reliably identify the objects you care about, but no higher. Higher resolutions dramatically increase both CPU (decoding) and detector (inference) workload, as noted in the [getting started guide](../guides/getting_started.md) and [camera documentation](../configuration/cameras.md). **Resolution Guidelines:** -- **1280x720 (720p):** Recommended starting point for most installations in the [getting started guide](../guides/getting_started). +- **1280x720 (720p):** Recommended starting point for most installations in the [getting started guide](../guides/getting_started.md). - **640x480 (VGA):** Suitable for close-range detection (doorways, small rooms) - **1920x1080 (1080p):** Only if detecting small objects at distance - **Avoid 4K for detection:** Rarely necessary and extremely resource-intensive -**Important:** Frigate will try to automatically detect the stream resolution if not specified, but it is recommended to explicitly set it in the [getting started guide](../guides/getting_started) to ensure consistency and help with troubleshooting. +**Important:** Frigate will try to automatically detect the stream resolution if not specified, but it is recommended to explicitly set it in the [getting started guide](../guides/getting_started.md) to ensure consistency and help with troubleshooting. ### Frame Rate Optimization -A higher frame rate for detection increases CPU and detector load without always improving accuracy. Most motion happens over multiple frames, so 5 FPS is typically sufficient for reliable object detection, as described in the [configuration reference](../configuration/reference) and [motion detection documentation](../configuration/motion_detection). +A higher frame rate for detection increases CPU and detector load without always improving accuracy. Most motion happens over multiple frames, so 5 FPS is typically sufficient for reliable object detection, as described in the [configuration reference](../configuration/reference.md) and [motion detection documentation](../configuration/motion_detection.md). **Recommended Configuration:** @@ -146,17 +146,17 @@ detect: **Frame Rate Guidelines:** -- **5 FPS:** Ideal for most use cases, recommended default in the [getting started guide](../guides/getting_started). +- **5 FPS:** Ideal for most use cases, recommended default in the [getting started guide](../guides/getting_started.md). - **3-4 FPS:** Acceptable for stationary camera monitoring slow-moving objects - **10 FPS:** Rarely beneficial, significantly increases resource usage. Only recommended for users with dedicated LPR cameras where the plate crosses the full frame very quickly. - **Over 10 FPS**: Significantly increases resource usage for no benefit. - **Reduce at camera level:** If possible, configure your camera to output 5 FPS directly in firmware to save bandwidth and decoding cycles -**Why 5 FPS works:** Frigate uses motion detection to decide when to run object detection. At 5 FPS, there's only 200ms between frames, which is fast enough to catch any significant motion while using 1/6th the resources of 30 FPS, as explained in the [motion detection documentation](../configuration/motion_detection). +**Why 5 FPS works:** Frigate uses motion detection to decide when to run object detection. At 5 FPS, there's only 200ms between frames, which is fast enough to catch any significant motion while using 1/6th the resources of 30 FPS, as explained in the [motion detection documentation](../configuration/motion_detection.md). ### Separate Streams for Detect vs. Record -One of the most impactful optimizations is using separate streams for detection and recording. This allows you to use a low-resolution sub-stream for efficient detection while recording high-quality footage from the main stream, as recommended in the [getting started guide](../guides/getting_started) and [camera documentation](../configuration/cameras). +One of the most impactful optimizations is using separate streams for detection and recording. This allows you to use a low-resolution sub-stream for efficient detection while recording high-quality footage from the main stream, as recommended in the [getting started guide](../guides/getting_started.md) and [camera documentation](../configuration/cameras.md). **Benefits of separate streams:** @@ -166,7 +166,7 @@ One of the most impactful optimizations is using separate streams for detection - Maintains high-quality recordings for evidence - Allows independent optimization of each stream -**Single Stream Configuration:** If you must use a single stream, Frigate will automatically assign the `detect` role, as described in the [camera documentation](../configuration/cameras). +**Single Stream Configuration:** If you must use a single stream, Frigate will automatically assign the `detect` role, as described in the [camera documentation](../configuration/cameras.md). However, this is less efficient as the same high-resolution stream must be decoded for both purposes. @@ -174,7 +174,7 @@ However, this is less efficient as the same high-resolution stream must be decod **Priority: High** -Motion detection acts as the critical first-line gatekeeper for object detection in Frigate. It determines when and where to run expensive AI inference, as explained in the [motion detection documentation](../configuration/motion_detection). Properly tuned motion detection ensures your detector only analyzes relevant areas, dramatically improving system efficiency. +Motion detection acts as the critical first-line gatekeeper for object detection in Frigate. It determines when and where to run expensive AI inference, as explained in the [motion detection documentation](../configuration/motion_detection.md). Properly tuned motion detection ensures your detector only analyzes relevant areas, dramatically improving system efficiency. ### Understanding Motion Detection's Role @@ -189,7 +189,7 @@ Frigate uses motion detection to identify areas of the frame worth checking with ### Motion Masks -Motion masks prevent specific areas from triggering object detection. This is one of the most impactful optimizations you can make, as described in the [mask documentation](../configuration/masks) and [motion detection documentation](../configuration/motion_detection). +Motion masks prevent specific areas from triggering object detection. This is one of the most impactful optimizations you can make, as described in the [mask documentation](../configuration/masks.md) and [motion detection documentation](../configuration/motion_detection.md). **What to mask:** @@ -204,7 +204,7 @@ Motion masks prevent specific areas from triggering object detection. This is on - Areas where you want to detect objects, even if you don't want alerts there - Paths objects might take to reach areas of interest -- Transition zones between monitored areas, as noted in the [mask documentation](../configuration/masks). +- Transition zones between monitored areas, as noted in the [mask documentation](../configuration/masks.md). **Critical distinction - Motion masks vs. Object filter masks:** Motion masks only prevent motion from triggering detection. They do NOT prevent objects from being detected if object detection was started due to motion in unmasked areas. If you want to filter out false positive detections in specific locations, use object filter masks instead (covered in Section 5). @@ -214,7 +214,7 @@ Motion masks prevent specific areas from triggering object detection. This is on Frigate provides several parameters to fine-tune motion detection sensitivity. These work together to determine what constitutes "motion" worthy of triggering object detection. -**threshold:** The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion, as described in the [configuration reference](../configuration/reference). +**threshold:** The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion, as described in the [configuration reference](../configuration/reference.md). ```yaml motion: @@ -403,9 +403,9 @@ objects: ### Object Filter Masks -Object filter masks are different from motion masks. They filter out false positives for specific object types based on location, evaluated at the bottom center of the detected object's bounding box, as described in the [mask documentation](../configuration/masks). +Object filter masks are different from motion masks. They filter out false positives for specific object types based on location, evaluated at the bottom center of the detected object's bounding box, as described in the [mask documentation](../configuration/masks.md). -See the [mask documentation](../configuration/masks) and [configuration reference](../configuration/reference) for more details. +See the [mask documentation](../configuration/masks.md) and [configuration reference](../configuration/reference.md) for more details. **Global object mask:** The mask under the `objects` section applies to all tracked object types and is combined with object-specific masks. @@ -416,7 +416,7 @@ See the [mask documentation](../configuration/masks) and [configuration referenc - Remove false positives in fixed locations (like a tree base frequently detected as a person) - Prevent animals from being detected in areas they can't physically access -**How it works:** The bottom center of the bounding box is evaluated against the mask. If it falls in a masked area, the detection is assumed to be a false positive and ignored, as explained in the [mask documentation](../configuration/masks). +**How it works:** The bottom center of the bounding box is evaluated against the mask. If it falls in a masked area, the detection is assumed to be a false positive and ignored, as explained in the [mask documentation](../configuration/masks.md). **Creating object filter masks:** Use the same mask creator tool in the UI (Settings → Mask / zone editor), but select "Object mask" instead of "Motion mask." @@ -1124,14 +1124,14 @@ Usually not the bottleneck, but needed if: **Optimization checklist before upgrading:** -1. ✓ Hardware acceleration enabled? See the [hardware acceleration documentation](../configuration/hardware_acceleration_video). -2. ✓ Detect resolution ≤720p? See the [getting started guide](../guides/getting_started). +1. ✓ Hardware acceleration enabled? See the [hardware acceleration documentation](../configuration/hardware_acceleration_video.md). +2. ✓ Detect resolution ≤720p? See the [getting started guide](../guides/getting_started.md). 3. ✓ Detect FPS ≤5? 4. ✓ Motion masks configured? 5. ✓ Separate detect/record streams? -6. ✓ Object filters tuned? See the [configuration reference](../configuration/reference). +6. ✓ Object filters tuned? See the [configuration reference](../configuration/reference.md). 7. ✓ Using efficient retention mode? -8. ✓ Tmpfs cache configured? See the [getting started guide](../guides/getting_started). +8. ✓ Tmpfs cache configured? See the [getting started guide](../guides/getting_started.md). If all optimizations applied and still insufficient, hardware upgrade is justified. @@ -1197,7 +1197,7 @@ If all optimizations applied and still insufficient, hardware upgrade is justifi 1. Check FFmpeg logs for specific errors 2. Verify network stability to cameras 3. Check storage space and I/O performance -4. Review retry_interval setting in the [configuration reference](../configuration/reference). +4. Review retry_interval setting in the [configuration reference](../configuration/reference.md). **Solutions:** @@ -1218,7 +1218,7 @@ If all optimizations applied and still insufficient, hardware upgrade is justifi **Diagnostic steps:** -1. Check configured shm_size in the [getting started guide](../guides/getting_started). +1. Check configured shm_size in the [getting started guide](../guides/getting_started.md). 2. Calculate actual requirements based on cameras 3. Review system memory usage 4. Check for memory leaks (increasing over time) @@ -1259,9 +1259,9 @@ If all optimizations applied and still insufficient, hardware upgrade is justifi Optimizing Frigate performance requires a systematic approach across multiple areas. The highest-impact optimizations are: -1. **Enable hardware acceleration for video decoding** in the [hardware acceleration documentation](../configuration/hardware_acceleration_video) - Reduces CPU usage by 50-80% -2. **Use a dedicated detector** in the [detector documentation](../configuration/object_detectors) - 10-20× faster than CPU detection -3. **Configure appropriate detect resolution and FPS** in the [getting started guide](../guides/getting_started) - Balance accuracy with resources +1. **Enable hardware acceleration for video decoding** in the [hardware acceleration documentation](../configuration/hardware_acceleration_video.md) - Reduces CPU usage by 50-80% +2. **Use a dedicated detector** in the [detector documentation](../configuration/object_detectors.md) - 10-20× faster than CPU detection +3. **Configure appropriate detect resolution and FPS** in the [getting started guide](../guides/getting_started.md) - Balance accuracy with resources 4. **Implement motion masks**- Reduce unnecessary detection cycles 5. **Use separate detect and record streams**- Optimize each stream for its purpose From 21938ebafae646fc74ebe271323c07b25472cd3d Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 1 Jan 2026 14:01:09 -0600 Subject: [PATCH 09/17] fix more links --- docs/docs/frigate/optimizing_performance.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/docs/frigate/optimizing_performance.md b/docs/docs/frigate/optimizing_performance.md index cc75acf69..afb6b348d 100644 --- a/docs/docs/frigate/optimizing_performance.md +++ b/docs/docs/frigate/optimizing_performance.md @@ -1147,7 +1147,7 @@ If all optimizations applied and still insufficient, hardware upgrade is justifi **Diagnostic steps:** -1. Verify hwaccel actually working (check logs for errors) in the [hardware acceleration documentation](../configuration/hardware_acceleration_video). +1. Verify hwaccel actually working (check logs for errors) in the [hardware acceleration documentation](../configuration/hardware_acceleration_video.md). 2. Confirm GPU device accessible in container 3. Check if using correct preset for your hardware 4. Verify camera streams are compatible codec (H.264/H.265) @@ -1178,11 +1178,11 @@ If all optimizations applied and still insufficient, hardware upgrade is justifi **Solutions:** -- Add second detector instance as described in the [detector documentation](../configuration/object_detectors). -- Reduce detect FPS from 5 to 3 in the [getting started guide](../guides/getting_started). +- Add second detector instance as described in the [detector documentation](../configuration/object_detectors.md). +- Reduce detect FPS from 5 to 3 in the [getting started guide](../guides/getting_started.md). - Add motion masks to reduce detection triggers -- Increase object filter thresholds in the [configuration reference](../configuration/reference). -- Consider upgrading detector hardware as described in the [detector documentation](../configuration/object_detectors). +- Increase object filter thresholds in the [configuration reference](../configuration/reference.md). +- Consider upgrading detector hardware as described in the [detector documentation](../configuration/object_detectors.md). ### Issue: Recording Gaps or Missing Footage From 613b6af1be348a0212664f24a93ecf24f09542d0 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 1 Jan 2026 13:31:03 -0700 Subject: [PATCH 10/17] Refactor new docs --- docs/docs/frigate/optimizing_performance.md | 1270 ------------------- docs/docs/troubleshooting/cpu.md | 73 ++ docs/docs/troubleshooting/memory.md | 2 +- docs/sidebars.ts | 24 +- 4 files changed, 94 insertions(+), 1275 deletions(-) delete mode 100644 docs/docs/frigate/optimizing_performance.md create mode 100644 docs/docs/troubleshooting/cpu.md diff --git a/docs/docs/frigate/optimizing_performance.md b/docs/docs/frigate/optimizing_performance.md deleted file mode 100644 index afb6b348d..000000000 --- a/docs/docs/frigate/optimizing_performance.md +++ /dev/null @@ -1,1270 +0,0 @@ ---- -id: optimizing_performance -title: Optimizing Performance ---- - -# Optimizing Performance - -Optimizing Frigate's performance is key to ensuring a responsive system and minimizing resource usage. This guide covers the most impactful configuration changes you can make to improve efficiency. - -## 1. Hardware Acceleration for Video Decoding - -**Priority: Critical** - -Video decompression is one of the most CPU-intensive tasks in Frigate. While an AI accelerator handles object detection, it does not assist with decoding video streams, as described in the [getting started guide](../guides/getting_started.md). Hardware acceleration (hwaccel) offloads this work to your GPU or specialized video decode hardware, significantly reducing CPU usage and enabling you to support more cameras on the same hardware. - -### Key Concepts - -**Resolution & FPS Impact:** The decoding burden grows exponentially with resolution and frame rate. A 4K stream at 30 FPS requires roughly 4 times the processing power of a 1080p stream at the same frame rate, and doubling the frame rate doubles the decode workload. This is why hardware acceleration becomes critical when working with multiple high-resolution cameras. - -**Hardware Acceleration Benefits:** By using dedicated video decode hardware (Intel QuickSync, NVIDIA NVDEC, AMD VCE, or VA-API), you can: - -- Reduce CPU usage by 50-80% per camera stream -- Support 2-3x more cameras on the same hardware -- Free up CPU resources for motion detection and other Frigate processes -- Reduce system heat and power consumption - -### Configuration - -Frigate provides preset configurations for common hardware acceleration scenarios. Set up `hwaccel_args` based on your hardware in your [configuration](../configuration/reference.md) as described in the [getting started guide](../guides/getting_started.md). - -### Troubleshooting Hardware Acceleration - -If hardware acceleration isn't working: - -1. Check Frigate logs for FFmpeg errors related to hwaccel -2. Verify the hardware device is accessible inside the container -3. Ensure your camera streams use H.264 or H.265 codecs (most common) -4. Try different presets if the automatic detection fails -5. Check that your GPU drivers are properly installed on the host system - -## 2. Detector Selection and Configuration - -**Priority: Critical** - -Choosing the right detector for your hardware is the single most important factor for detection performance. The detector is responsible for running the AI model that identifies objects in video frames. Different detector types have vastly different performance characteristics and hardware requirements, as detailed in the [detector documentation](../configuration/object_detectors.md). - -### Understanding Detector Performance - -Frigate uses motion detection as a first-line check before running expensive object detection, as explained in the [motion detection documentation](../configuration/motion_detection.md). When motion is detected, Frigate creates a "region" (the green boxes in the debug viewer) and sends it to the detector. The detector's inference speed determines how many detections per second your system can handle. - -**Calculating Detector Capacity:** Your detector has a finite capacity measured in detections per second. With an inference speed of 10ms, your detector can handle approximately 100 detections per second (1000ms / 10ms = 100).If your cameras collectively require more than this capacity, you'll experience delays, missed detections, or the system will fall behind. - -### Choosing the Right Detector - -Different detectors have vastly different performance characteristics, as shown in the [detector documentation](../configuration/object_detectors.md): - -**OpenVINO on Intel:** - -- Inference speed: 10-35ms depending on iGPU/GPU -- Best for: Systems with Intel CPUs (6th gen+) or Arc GPUs -- Requires: Intel CPU with integrated graphics or discrete Intel GPU - -**Hailo-8/8L:** - -- Inference speed: 7-11ms for yolov6n -- Best for: Newer installations seeking alternatives to Coral -- Requires: M.2 Hailo device - -**NVIDIA TensorRT:** - -- Inference speed: 8-17ms depending on GPU and model -- Best for: Systems with NVIDIA GPUs, especially for many cameras -- Requires: NVIDIA GPU with CUDA support - -**AMD ROCm:** - -- Inference speed: 14-50ms depending on model size -- Best for: Systems with AMD discrete GPUs -- Requires: Supported AMD GPU, use -rocm Frigate image - -**CPU Detector (Not Recommended):** - -- Inference speed: 50-300ms depending on CPU -- Best for: Testing only, not production use -- Note: Even low-end dedicated detectors vastly outperform CPU detection - -### Multiple Detector Instances - -When a single detector cannot keep up with your camera count, some detector types (`openvino`, `onnx`) allow you to define multiple detector instances to share the workload. This is particularly useful with GPU-based detectors that have sufficient VRAM to run multiple inference processes: - -```yaml -detectors: - ov_0: - type: openvino - device: GPU - ov_1: - type: openvino - device: GPU -``` - -**When to add a second detector:** - -- Your detection_fps consistently falls below your configured detect fps -- The detector process shows 100% utilization in System Metrics -- You're experiencing delays in object detection appearing in the UI -- You see "detection appears to be stuck" warnings in logs - -### Model Selection and Optimization - -The model you use significantly impacts detector performance. Frigate provides default models optimized for each detector type, but you can customize them as described in the [detector documentation](../configuration/object_detectors.md). - -**Model Size Trade-offs:** - -- Smaller models (320x320): Faster inference, Frigate is specifically optimized for a 320x320 size model. -- Larger models (640x640): Slower inference, can often times have better accuracy depending on the camera frame and objects you are trying to detect, but also prevents Frigate from zooming in as much on the frame. - -## 3. Camera Stream Configuration - -**Priority: High** - -Properly configuring your camera streams is essential for optimal performance. Frigate supports multiple input streams per camera, allowing you to use different resolutions and frame rates for different purposes, as explained in the [camera documentation](../configuration/cameras.md). - -### Detect Stream Resolution - -Your `detect` stream resolution should be just high enough to reliably identify the objects you care about, but no higher. Higher resolutions dramatically increase both CPU (decoding) and detector (inference) workload, as noted in the [getting started guide](../guides/getting_started.md) and [camera documentation](../configuration/cameras.md). - -**Resolution Guidelines:** - -- **1280x720 (720p):** Recommended starting point for most installations in the [getting started guide](../guides/getting_started.md). -- **640x480 (VGA):** Suitable for close-range detection (doorways, small rooms) -- **1920x1080 (1080p):** Only if detecting small objects at distance -- **Avoid 4K for detection:** Rarely necessary and extremely resource-intensive - -**Important:** Frigate will try to automatically detect the stream resolution if not specified, but it is recommended to explicitly set it in the [getting started guide](../guides/getting_started.md) to ensure consistency and help with troubleshooting. - -### Frame Rate Optimization - -A higher frame rate for detection increases CPU and detector load without always improving accuracy. Most motion happens over multiple frames, so 5 FPS is typically sufficient for reliable object detection, as described in the [configuration reference](../configuration/reference.md) and [motion detection documentation](../configuration/motion_detection.md). - -**Recommended Configuration:** - -```yaml -detect: - fps: 5 -``` - -**Frame Rate Guidelines:** - -- **5 FPS:** Ideal for most use cases, recommended default in the [getting started guide](../guides/getting_started.md). -- **3-4 FPS:** Acceptable for stationary camera monitoring slow-moving objects -- **10 FPS:** Rarely beneficial, significantly increases resource usage. Only recommended for users with dedicated LPR cameras where the plate crosses the full frame very quickly. -- **Over 10 FPS**: Significantly increases resource usage for no benefit. -- **Reduce at camera level:** If possible, configure your camera to output 5 FPS directly in firmware to save bandwidth and decoding cycles - -**Why 5 FPS works:** Frigate uses motion detection to decide when to run object detection. At 5 FPS, there's only 200ms between frames, which is fast enough to catch any significant motion while using 1/6th the resources of 30 FPS, as explained in the [motion detection documentation](../configuration/motion_detection.md). - -### Separate Streams for Detect vs. Record - -One of the most impactful optimizations is using separate streams for detection and recording. This allows you to use a low-resolution sub-stream for efficient detection while recording high-quality footage from the main stream, as recommended in the [getting started guide](../guides/getting_started.md) and [camera documentation](../configuration/cameras.md). - -**Benefits of separate streams:** - -- Detect stream can be 720p @ 5 FPS for efficiency -- Record stream can be 4K @ 15-30 FPS for quality -- Reduces detector workload by 75% or more -- Maintains high-quality recordings for evidence -- Allows independent optimization of each stream - -**Single Stream Configuration:** If you must use a single stream, Frigate will automatically assign the `detect` role, as described in the [camera documentation](../configuration/cameras.md). - -However, this is less efficient as the same high-resolution stream must be decoded for both purposes. - -## 4. Motion Detection Tuning - -**Priority: High** - -Motion detection acts as the critical first-line gatekeeper for object detection in Frigate. It determines when and where to run expensive AI inference, as explained in the [motion detection documentation](../configuration/motion_detection.md). Properly tuned motion detection ensures your detector only analyzes relevant areas, dramatically improving system efficiency. - -### Understanding Motion Detection's Role - -Frigate uses motion detection to identify areas of the frame worth checking with object detection: - -1. **Motion Detection:** Analyzes pixel changes between frames to detect movement -2. **Motion Boxes:** Groups nearby motion into rectangles (red boxes in debug view) -3. **Region Creation:** Creates square regions around motion areas (green boxes) -4. **Object Detection:** Runs AI inference only on these regions - -**Why this matters:** Without motion detection, Frigate would need to run object detection on every frame of every camera continuously, which would be computationally impossible for most systems. Motion detection reduces detector workload by 90-95% in typical scenarios. - -### Motion Masks - -Motion masks prevent specific areas from triggering object detection. This is one of the most impactful optimizations you can make, as described in the [mask documentation](../configuration/masks.md) and [motion detection documentation](../configuration/motion_detection.md). - -**What to mask:** - -- Camera timestamps and on-screen displays -- Trees, bushes, and vegetation that move in wind -- Flags, banners, or other constantly moving objects -- Sky and clouds -- Rooftops and distant background areas -- Reflective surfaces with changing light - -**What NOT to mask:** - -- Areas where you want to detect objects, even if you don't want alerts there -- Paths objects might take to reach areas of interest -- Transition zones between monitored areas, as noted in the [mask documentation](../configuration/masks.md). - -**Critical distinction - Motion masks vs. Object filter masks:** Motion masks only prevent motion from triggering detection. They do NOT prevent objects from being detected if object detection was started due to motion in unmasked areas. If you want to filter out false positive detections in specific locations, use object filter masks instead (covered in Section 5). - -**Over-masking warning:** Be cautious about masking too much. Motion is used during object tracking to refine the object detection area in the next frame. Over-masking will make it more difficult for objects to be tracked. If an object walks from an unmasked area into a fully masked area, they essentially disappear and will be picked up as a "new" object if they leave the masked area. - -### Motion Sensitivity Parameters - -Frigate provides several parameters to fine-tune motion detection sensitivity. These work together to determine what constitutes "motion" worthy of triggering object detection. - -**threshold:** The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion, as described in the [configuration reference](../configuration/reference.md). - -```yaml -motion: - threshold: 30 -``` - -- **Range:** 1-255 -- **Default:** 30 -- **Higher values:** Make motion detection less sensitive (fewer false triggers) -- **Lower values:** Make motion detection more sensitive (may trigger on minor changes) -- **When to increase:** Cameras with noisy sensors, busy backgrounds, or frequent lighting changes -- **When to decrease:** Missing slow-moving objects or subtle motion - -**contour_area:** Minimum size in pixels in the resized motion image that counts as motion. - -```yaml -motion: - contour_area: 10 -``` - -- **Default:** 10 -- **High sensitivity:** 10 (detects small movements like insects, leaves) -- **Medium sensitivity:** 30 (typical people and vehicle motion) -- **Low sensitivity:** 50 (only larger movements) -- **When to increase:** Reduce false triggers from small movements like insects or debris -- **When to decrease:** Ensure detection of small objects or distant motion - -### Improve Contrast - -The improve_contrast setting enables dynamic contrast improvement to help with challenging lighting conditions: - -```yaml -motion: - improve_contrast: True -``` - -**Benefits:** - -- Helps improve night detections in low-light conditions -- Makes subtle motion more visible to the motion detection algorithm -- Particularly useful for cameras without good low-light performance - -**Trade-offs:** - -- May increase motion sensitivity during daytime -- Can cause more false triggers in high-contrast scenes -- Slightly increases CPU usage for motion processing - -**When to enable:** - -- Cameras struggling with night detection -- Low-quality cameras with poor dynamic range -- Scenes with challenging lighting (backlighting, deep shadows) - -**When to disable:** - -- Already experiencing too many false motion triggers -- High-quality cameras with good low-light performance -- CPU usage is a concern - -### Debugging Motion Detection - -To visualize and tune motion detection: - -1. Navigate to your camera in the Frigate UI -2. Select "Debug" at from the settings cog -3. Enable "Motion boxes" in the options -4. Watch for red boxes showing detected motion areas - -**What to look for:** - -- **Constant motion boxes:** Indicates areas that should be masked -- **No motion boxes when objects move:** Motion detection may be too insensitive -- **Motion boxes everywhere:** Motion detection is too sensitive -- **Boxes not covering moving objects:** May need to adjust contour_area or threshold - -**Iterative tuning process:** - -1. Start with default settings -2. Identify problem areas using debug view -3. Add motion masks for constantly triggering areas -4. Adjust sensitivity parameters if needed -5. Test with real-world scenarios -6. Repeat until optimized - -## 5. Object Detection Optimization - -**Priority: Medium-High** - -Once motion detection has identified areas worth analyzing, object detection determines what's actually in those regions. Proper configuration of object detection filters and tracking parameters ensures Frigate only tracks relevant objects and ignores false positives. - -### Object Filters - -Object filters allow you to ignore detections based on size, shape, confidence score, and location. These filters run after the AI model has identified an object, preventing Frigate from wasting resources tracking objects you don't care about. - -**Basic filter configuration:** - -```yaml -objects: - track: - - person - filters: - person: - min_area: 5000 - max_area: 100000 - min_score: 0.5 - threshold: 0.7 -``` - -**min_area:** Minimum size of the bounding box for the detected object. - -- **Default:** 0 (no minimum) -- **Can be specified:** As pixels (width × height) or as a decimal percentage of frame (0.000001 to 0.99) -- **Example:** 5000 pixels filters out very small detections -- **Use case:** Ignore distant objects, insects, or detection artifacts -- **Tuning tip:** Check the bounding box size in the UI for objects you want to track, then set min_area slightly below that - -**max_area:** Maximum size of the bounding box for the detected object. - -- **Default:** 24000000 (essentially unlimited) -- **Can be specified:** As pixels or as a decimal percentage of frame -- **Example:** 100000 pixels prevents tracking objects that fill most of the frame -- **Use case:** Filter out false positives from camera adjustments, lens flares, or objects too close to camera -- **Tuning tip:** Objects filling >50% of frame are often false positives or not useful to track - -**min_ratio:** Minimum width/height ratio of the bounding box. - -```yaml -objects: - filters: - person: - min_ratio: 0.5 -``` - -- **Default:** 0 (no minimum) -- **Purpose:** Filter objects based on aspect ratio -- **Example:** 0.5 means width must be at least half the height -- **Use case:** People are typically taller than wide; very wide detections are likely false positives -- **Tuning tip:** Calculate ratio of typical objects (person standing ≈ 0.3-0.6, car ≈ 1.5-3.0) - -**max_ratio:** Maximum width/height ratio of the bounding box. - -```yaml -objects: - filters: - person: - max_ratio: 2.0 -``` - -- **Default:** 24000000 (essentially unlimited) -- **Example:** 2.0 means width can't exceed 2× the height -- **Use case:** Filter out horizontally stretched false positives -- **Tuning tip:** Adjust based on expected object orientations in your scene - -**min_score:** Minimum score for the object to initiate tracking. - -```yaml -objects: - filters: - person: - min_score: 0.5 -``` - -- **Default:** 0.5 (50% confidence) -- **Purpose:** Initial confidence threshold to start tracking an object -- **Range:** 0.0 to 1.0 -- **Higher values:** Fewer false positives, but may miss valid detections -- **Lower values:** More detections captured, but more false positives -- **Tuning tip:** Start at 0.5, increase if too many false positives - -**threshold:** Minimum decimal percentage for tracked object's computed score to be considered a true positive. - -```yaml -objects: - filters: - person: - threshold: 0.7 -``` - -- **Default:** 0.7 (70% confidence) -- **Purpose:** Final threshold for an object to be saved/alerted -- **How it works:** Frigate tracks objects over multiple frames and calculates a median score; this must exceed the threshold -- **Difference from min_score:** min_score starts tracking, threshold confirms it's real -- **Minimum frames:** Takes at least 3 frames for Frigate to determine if an object meets the threshold -- **Tuning tip:** This is your primary tool for reducing false positives without missing detections - -### Object Filter Masks - -Object filter masks are different from motion masks. They filter out false positives for specific object types based on location, evaluated at the bottom center of the detected object's bounding box, as described in the [mask documentation](../configuration/masks.md). - -See the [mask documentation](../configuration/masks.md) and [configuration reference](../configuration/reference.md) for more details. - -**Global object mask:** The mask under the `objects` section applies to all tracked object types and is combined with object-specific masks. - -**Object-specific masks:** Masks under specific object types (like `person`) only apply to that object type. - -**Use cases:** - -- Remove false positives in fixed locations (like a tree base frequently detected as a person) -- Prevent animals from being detected in areas they can't physically access - -**How it works:** The bottom center of the bounding box is evaluated against the mask. If it falls in a masked area, the detection is assumed to be a false positive and ignored, as explained in the [mask documentation](../configuration/masks.md). - -**Creating object filter masks:** Use the same mask creator tool in the UI (Settings → Mask / zone editor), but select "Object mask" instead of "Motion mask." - -**Example scenario:** A tree base is frequently detected as a person. Use the Frigate UI to create a precise object filter mask over the location where the bottom center of the false detection typically appears. - -## 6. Recording Configuration - -**Priority: Medium** - -Recording configuration impacts both storage requirements and system performance. Properly configured recording ensures you capture the footage you need without wasting disk space or CPU cycles. - -### Retention Modes - -Frigate offers three retention modes that determine which recording segments are kept. Each mode has different storage and CPU implications: - -**all:** Save all recording segments regardless of activity. - -```yaml -record: - retain: - days: 7 - mode: all -``` - -- **Storage impact:** Highest - stores everything continuously -- **CPU impact:** Moderate - still needs to manage and clean up old segments -- **Use case:** Critical areas requiring complete coverage, legal requirements -- **Disk usage:** ~1-3 GB per camera per day (depending on resolution/bitrate) - -**motion:** Save all recording segments with any detected motion. - -```yaml -record: - retain: - days: 7 - mode: motion -``` - -- **Storage impact:** Medium - only stores when motion detected -- **CPU impact:** Low - fewer segments to manage -- **Use case:** Most common configuration, balances coverage with storage -- **Disk usage:** ~200-800 MB per camera per day (depending on activity) -- **Note:** Motion detection must be enabled for this mode to work - -**active_objects:** Save all recording segments with active/moving objects. - -```yaml -record: - retain: - days: 7 - mode: active_objects -``` - -- **Storage impact:** Lowest - only stores when tracked objects are moving -- **CPU impact:** Lowest - minimal segments to manage -- **Use case:** Storage-constrained installations, focus on object events only -- **Disk usage:** ~50-300 MB per camera per day (depending on object activity) -- **Note:** Most efficient option but may miss motion-only events - -**Important consideration:** If the retain mode for the camera is more restrictive than modes configured for alerts/detections, the segments will already be gone by the time those modes are applied. For example, if the camera retain mode is "motion", segments without motion are never stored, so setting alerts mode to "all" won't bring them back. - -### Recording Enable/Disable - -Recording must be enabled in the configuration to function: - -```yaml -record: - enabled: False -``` - -- **Default:** False (disabled) -- **Critical warning:** If recording is disabled in the config, turning it on via the UI or MQTT later will have no effect -- **Must be set to True:** To enable recording functionality -- **Can be overridden:** At the camera level for selective recording - -**Camera-level override:** - -```yaml -cameras: - front_door: - record: - enabled: True - retain: - days: 30 -``` - -## 7. System-Level Optimizations - -**Priority: Medium** - -Beyond camera and detection configuration, several system-level settings can significantly impact Frigate's performance and reliability. - -### Shared Memory (shm-size) - -Frigate uses shared memory to pass frames between processes efficiently. Insufficient shared memory will cause Frigate to crash with bus errors. - -**Docker Compose configuration:** - -```yaml -services: - frigate: - shm_size: "256mb" -``` - -**General guidelines:** - -- **Minimum:** 128 MB for single camera installations -- **Typical:** 256 MB for 2-5 cameras -- **Large installations:** 512 MB - 1 GB for 10+ cameras or 4K streams -- **Symptoms of insufficient shm:** Bus error crashes, "Failed to create shared memory" errors - -The Frigate UI and logs will warn you if your `shm_size` is too low. - -### Tmpfs for Cache - -Using a tmpfs (RAM disk) for Frigate's cache reduces disk wear and improves performance. - -**Configuration:** - -```yaml -volumes: - - type: tmpfs - target: /tmp/cache - tmpfs: - size: 1000000000 # 1 GB -``` - -**Benefits:** - -- **Faster I/O:** RAM is orders of magnitude faster than disk -- **Reduced disk wear:** Eliminates constant write cycles to storage media -- **Lower latency:** Improves responsiveness for live viewing and clip generation -- **Extended hardware life:** Critical for SD cards and extends SSD lifespan - -**Size recommendations:** - -- **Minimum:** 500 MB for very small installations -- **Typical:** 1-2 GB for most installations -- **Large installations:** 4-8 GB for many cameras or high-resolution streams -- **Calculation:** Roughly 200-500 MB per camera depending on resolution - -**Trade-offs:** - -- **RAM usage:** Reduces available system RAM -- **Volatility:** Cache is lost on restart (not an issue for temporary cache) -- **System requirements:** Ensure sufficient RAM for OS, Frigate, and tmpfs - -## 8. Understanding Resource Usage - -**Priority: Educational** - -Understanding how Frigate uses system resources helps you identify bottlenecks and optimize configuration effectively. - -### CPU Usage Breakdown - -Frigate's CPU usage comes from several distinct processes: - -**Video Decoding:** - -- **Primary CPU consumer** in most installations -- Decompresses video streams from cameras -- Scales with: resolution, frame rate, number of cameras, codec complexity -- **Mitigation:** Hardware acceleration (see Section 1) - -**Motion Detection:** - -- Analyzes pixel changes between frames -- Runs on every frame of every camera -- Scales with: detect resolution, frame rate, motion sensitivity settings -- **Mitigation:** Lower detect resolution, reduce FPS, use motion masks - -**Detector CPU**: - -- Prepares model inputs for inference on your object detection hardware -- Runs when motion is detected -- Scales with: frame rate, number of cameras, complexity of the object detection model -- **Mitigation**: Reduce motion sensitivity and detect fps, switch to a less complex model, add a second detector instance (`openvino` or `onnx`) - -**FFmpeg Process Management:** - -- Manages camera connections and stream handling -- Generally low overhead -- Can spike during reconnection attempts -- **Mitigation:** Stable network - -**Database Operations:** - -- Event storage and retrieval -- Generally minimal impact -- Can increase with very high event rates -- **Mitigation:** Proper retention settings, SSD storage - -**Enrichments**: - -- Semantic Search, Face recognition, LPR, custom classification -- Semantic Search generates embeddings for tracked object thumbnails. If run on the CPU, this may significantly drive up CPU usage -- Face recognition, LPR, and custom classification are usually lightweight, but if many faces, plates, or objects are detected, this can slow the pipeline -- **Mitigation**: Run Semantic Search on a GPU and/or disable other enrichments - -**GenAI**: - -- Generative AI for tracked object and review item descriptions -- Local (Ollama) and cloud providers are available. Using a local provider without a powerful GPU will significantly increase resource usage -- **Mitigation**: Offload to a cloud provider or use a powerful GPU for inference - -**Web Interface:** - -- Serving live streams and UI -- Increases with number of concurrent viewers -- WebRTC/MSE streaming via go2rtc -- **Mitigation:** Limit concurrent viewers, use sub-streams for viewing - -### Resolution Impact - -Resolution affects both CPU workload exponentially: - -**CPU (Decoding) Impact:** - -- **720p (1280×720):** Baseline reference point -- **1080p (1920×1080):** ~2.25× the pixels, roughly 2× the CPU load -- **4K (3840×2160):** ~9× the pixels of 720p, roughly 6-8× the CPU load -- **Why not linear:** Codec overhead, memory bandwidth, and cache efficiency - -**Example comparison:** - -- Decoding 720p stream: 15% CPU per camera -- Decoding 1080p stream: 30-35% CPU per camera -- Decoding 4K stream: 90-120% CPU per camera (may require multiple cores) - -**Detector (Inference) Impact:** - -The detector must analyze more pixels, but the impact depends on your model: - -- **Fixed input models (320×320, 640×640):** Frigate resizes the frame to match model input, so resolution has minimal direct impact on inference time -- **Indirect impact:** Higher resolution means larger motion regions, potentially more detection runs -- **Region extraction:** Larger source frames require more CPU to crop detection regions - -**Practical implications:** - -- Use lowest resolution that meets detection needs for detect stream -- Save high resolution for record stream only -- 720p is the sweet spot for most detection scenarios -- 1080p only if detecting small/distant objects -- 4K almost never necessary for detection - -### Memory Usage - -Frigate's memory usage comes from several sources: - -**Frame Buffers:** - -- Raw decoded frames stored in shared memory -- Size: resolution × color depth × number of buffered frames -- Example: 1920×1080×3 bytes × 10 frames = ~62 MB per camera - -**Object Tracking:** - -- Metadata for each tracked object -- Minimal per object (~1-2 KB) -- Scales with number of simultaneous tracked objects - -**Database Cache:** - -- SQLite database held in memory for performance -- Typically 50-200 MB depending on history - -**Python Process:** - -- Frigate application overhead -- Generally 200-500 MB base usage -- Increases slightly with number of cameras - -**Detector Model:** - -- AI model loaded into detector memory -- OpenVINO/ONNX: 50-200 MB depending on model -- GPU VRAM: 500 MB - 2 GB depending on model and batch size - -**Enrichments:** - -- Semantic Search, LPR, Face Recognition, and custom classification models loaded into memory -- Depending on the enrichment, these models can be quite large. Semantic Search: ~4-6GB, Face Recognition, LPR, custom classification: ~500MB-1GB - -**Total memory recommendations:** - -- **Minimal (1-2 cameras):** 2 GB RAM -- **Small (3-5 cameras):** 4 GB RAM -- **Medium (6-10 cameras):** 8 GB RAM -- **Large (10+ cameras):** 16 GB+ RAM - -### Storage Usage - -Storage requirements vary dramatically based on configuration: - -**Recording storage by mode:** - -**Mode: all (continuous recording)** - -- 1080p @ 15 FPS, 2 Mbps: ~21 GB per camera per day -- 1080p @ 30 FPS, 4 Mbps: ~43 GB per camera per day -- 4K @ 15 FPS, 8 Mbps: ~86 GB per camera per day -- 4K @ 30 FPS, 16 Mbps: ~173 GB per camera per day - -**Mode: motion** - -- Depends heavily on activity level -- Low activity (suburban home): 3–8 GB per camera per day -- Medium activity (urban home): 8–20 GB per camera per day -- High activity (retail, busy street): 20–50 GB per camera per day - -**Mode: active_objects** - -- Most efficient, only records when objects detected -- Low activity: 1–3 GB per camera per day -- Medium activity: 3–8 GB per camera per day -- High activity: 8–15 GB per camera per day - -**Database storage:** - -- Grows with number of events -- Typical: 100–300 MB for months of events -- Cleaned automatically based on retention settings - -**Clips and snapshots:** - -- Stored separately from recordings -- Size depends on alert/detection frequency -- Typical: 100–500 MB per camera per month - -**Storage planning example:** - -``` -5 cameras, 1080p recording, motion mode, 14 day retention: -5 cameras × 10 GB/day × 14 days = 700 GB minimum -Add 30% buffer: ~900 GB recommended -``` - -### Network Bandwidth - -Network bandwidth impacts both camera-to-Frigate and Frigate-to-client connections: - -**Camera to Frigate (inbound):** - -- Depends on camera bitrate and number of streams -- 1080p @ 2 Mbps: 2 Mbps per stream -- If using separate detect and record streams: sum both streams -- Example: 5 cameras × (2 Mbps detect + 4 Mbps record) = 30 Mbps total - -**Frigate to Clients (outbound):** - -- Live viewing via WebRTC/MSE through go2rtc -- Each viewer consumes full stream bandwidth -- Example: 3 viewers watching 1080p @ 4 Mbps = 12 Mbps outbound -- Mitigation: Use lower bitrate sub-streams for live viewing - -**MQTT traffic:** - -- Minimal, typically less than 1 Mbps even with many cameras -- Event notifications and state updates - -**API traffic:** - -- Varies with UI usage and integrations -- Generally negligible compared to video streams - -### Identifying Bottlenecks - -**Symptoms and causes:** - -**High CPU usage:** - -- **Cause:** Video decoding without hardware acceleration -- **Solution:** Enable hwaccel_args (Section 1) -- **Cause:** High detect resolution or FPS -- **Solution:** Lower detect resolution/FPS (Section 3) - -**Detector CPU Usage at 100%:** - -- **Cause:** Too many cameras or too much motion -- **Solution:** Add second detector instance (Section 2) -- **Cause:** Model too complex for hardware -- **Solution:** Use smaller/faster model - -**Detection FPS below configured FPS:** - -- **Cause:** System can't keep up with configured rate -- **Solution:** Reduce detect FPS, add hardware acceleration, add detector -- **Cause:** Excessive motion triggering constant detection -- **Solution:** Add motion masks (Section 4) - -**Recording gaps or stuttering:** - -- **Cause:** Insufficient disk I/O performance -- **Solution:** Use faster storage (SSD), reduce recording resolution -- **Cause:** Network issues with camera -- **Solution:** Check network stability - -**Out of memory crashes:** - -- **Cause:** Insufficient shared memory -- **Solution:** Increase shm_size (Section 7) -- **Cause:** Too many simultaneous high-resolution streams -- **Solution:** Reduce number of cameras or resolution - -**Slow UI/high latency:** - -- **Cause:** Too many concurrent viewers -- **Solution:** Limit viewers, use sub-streams for viewing -- **Cause:** Slow database storage -- **Solution:** Move database to SSD - -## 9. Monitoring and Troubleshooting - -**Priority: Medium** - -Effective monitoring helps you identify performance issues before they impact your system's reliability. - -### Frigate UI Metrics - -The Frigate web interface provides real-time performance metrics: - -**System Stats (Debug page):** - -- **CPU Usage:** Overall system CPU percentage -- **Detector Inference Speed:** Milliseconds per detection -- **Detection FPS:** Actual detections per second being processed -- **Process FPS:** Frames being processed per second -- **Skipped FPS:** Frames skipped due to system overload - -**Camera-specific stats:** - -- **Camera FPS:** Actual frame rate from camera -- **Detection FPS:** Rate at which this camera's frames are being analyzed -- **Process FPS:** Rate at which frames are being decoded -- **Skipped FPS:** Frames dropped due to processing delays - -**What to monitor:** - -- **Detection FPS approaching detector capacity:** Time to add detector -- **Skipped FPS > 0:** System falling behind, needs optimization -- **Process FPS < Camera FPS:** Decoding bottleneck, enable hwaccel -- **Inference speed increasing:** Detector struggling, may need upgrade - -### Log Analysis - -Frigate logs provide detailed information about system behavior and errors: - -**Key log messages to watch for:** - -**"FFmpeg process crashed unexpectedly"** - -- **Cause:** Camera stream issues, network problems, or invalid FFmpeg args -- **Solution:** Check camera accessibility, verify FFmpeg configuration -- **Debug:** Enable FFmpeg logging to see detailed error - -**"Detection appears to be stuck"** - -- **Cause:** Detector process hung or overloaded -- **Solution:** Restart Frigate, check detector hardware, add second detector -- **Prevention:** Monitor detector usage, don't exceed capacity - -**"Unable to read frames from camera"** - -- **Cause:** Network issues, camera offline, or authentication failure -- **Solution:** Verify camera network connection, check credentials -- **Note:** Normal during camera reboots or brief outages - -**"Insufficient shared memory"** - -- **Cause:** shm_size too small for configured cameras -- **Solution:** Increase shm_size in Docker configuration (Section 7) -- **Critical:** Will cause crashes if not addressed - -**"Skipping frame, detection queue is full"** - -- **Cause:** Detector can't keep up with detection requests -- **Solution:** Add second detector, reduce detect FPS, add motion masks -- **Impact:** Missing potential detections during high activity - -### Setting Log Levels - -Adjust log verbosity for troubleshooting: - -```yaml -logger: - default: info - logs: - frigate.event: debug - frigate.record: debug -``` - -**Log levels:** - -- **error:** Only critical errors (minimal logging) -- **warning:** Errors and warnings (recommended for production) -- **info:** General information (default, good balance) -- **debug:** Detailed debugging information (troubleshooting only) - -**Component-specific logging:** - -- `frigate.event`: Object detection and tracking events -- `frigate.record`: Recording and retention operations -- `frigate.mqtt`: MQTT communication -- `frigate.object_processing`: Object detection processing -- `frigate.motion`: Motion detection - -**Best practices:** - -- Use `info` for default level in production -- Enable `debug` only for specific components when troubleshooting -- Excessive debug logging can impact performance -- Review logs regularly for warnings and errors - -### Debug View - -The debug view in Frigate UI is essential for optimization: - -**Accessing debug view:** - -1. Navigate to camera in Frigate UI -2. Click "Debug" from the Settings cog -3. Enable visualization options - -**Debug overlays:** - -**Motion boxes (red):** - -- Shows areas where motion was detected -- Helps identify areas to mask -- Reveals motion detection sensitivity issues - -**Regions (green):** - -- Shows areas sent to detector for object detection -- Should correspond to motion boxes -- Large regions indicate inefficient detection - -**Objects (blue):** - -- Shows detected objects with labels and confidence scores -- Helps tune min_score and threshold values -- Reveals false positives and missed detections - -**Zones (purple):** - -- Shows defined zones if configured -- Helps verify zone coverage -- Useful for zone-based filtering - -**Using debug view for optimization:** - -1. Enable motion boxes to identify constant motion areas -2. Add motion masks for these areas -3. Enable objects to see detection confidence scores -4. Adjust threshold if too many low-confidence detections -5. Verify regions aren't excessively large - -## 10. Hardware Upgrade Path - -**Priority: Reference** - -When software optimization isn't enough, hardware upgrades provide the next performance tier. - -### Upgrade Priority Order - -**1. Add a dedicated detector (Highest impact)** - -If using a CPU detector, adding any dedicated detector provides massive improvement: - -**Hailo-8L M.2:** - -- Similar performance to Coral -- M.2 form factor for cleaner installation -- Good alternative if Coral unavailable - -**Intel Arc A310/A380:** - -- Excellent for OpenVINO -- Also provides hardware decode acceleration -- Supports 10-20 cameras -- Dual benefit: detection + hwaccel - -**2. Enable hardware decode acceleration (High impact)** - -If using CPU for video decoding: - -**Intel CPU with QuickSync (6th gen+):** - -- Already have it if using Intel CPU -- Just enable VA-API in configuration -- Reduces CPU usage 50-80% -- Supports many simultaneous streams - -**Add discrete GPU for decode:** - -- Intel Arc A310: excellent decode + OpenVINO -- NVIDIA GTX 1650: good decode + TensorRT option -- Dedicated decode hardware frees CPU - -**3. Add second detector instance (Medium-high impact)** - -When single detector at capacity: - -**Requirements:** - -- Sufficient GPU VRAM (for GPU detectors) -- Or second physical detector (second `openvino` or `onnx` instance) -- Minimal configuration change, but not supported by all detector types - -**Benefit:** - -- Doubles detection capacity -- Handles twice as many cameras -- Reduces detection latency during peaks - -**4. Upgrade CPU (Medium impact)** - -If decode is bottleneck even with hwaccel: - -**Intel 12th gen+ with better QuickSync:** - -- Improved decode efficiency -- More streams per CPU -- Better integrated GPU performance - -**Higher core count:** - -- More parallel decode streams -- Better for many cameras -- Diminishing returns beyond 8 cores for Frigate - -**5. Upgrade storage (Low-medium impact)** - -If experiencing recording issues: - -**NVMe SSD:** - -- Fastest I/O for database and recordings -- Reduces latency for clip generation -- Essential for 10+ cameras - -**Dedicated recording drive:** - -- Separate OS/database from recordings -- Prevents recording I/O from impacting system -- Can use slower/cheaper storage for recordings - -**6. Increase RAM (Low impact)** - -Usually not the bottleneck, but needed if: - -- Running many other services -- Using very large tmpfs cache -- 20+ cameras with high resolution - -**Recommendations:** - -- 4 GB minimum for Frigate -- 8 GB comfortable for most installations -- 16 GB for large installations or shared server - -### Hardware Recommendations by Scale - -**Small Installation (1-3 cameras):** - -- **Minimum:** Raspberry Pi 4 (4GB) + Coral -- **Better:** Intel N100 mini PC (built-in QuickSync + Coral) -- **Best:** Intel N100 + Coral - -**Medium Installation (4-8 cameras):** - -- **Minimum:** Intel 8th gen+ CPU -- **Better:** Intel 12th gen+ with integrated GPU + Hailo or MemryX -- **Best:** Intel with Arc A310 (decode + OpenVINO or Hailo/MemryX) - - **Large Installation (9-15 cameras):** - -- **Minimum:** Intel 10th gen+ + OpenVINO or Hailo or MemryX -- **Better:** Intel 12th gen+ + Hailo-8 or MemryX -- **Best:** Intel with Arc A380 (decode + OpenVINO) or NVIDIA RTX 3060 (decode + ONNX) - -**Very Large Installation (16+ cameras):** - -- **Minimum:** Intel 12th gen+ + 2× Hailo-8L -- **Better:** Dedicated server with NVIDIA RTX 3060/4060 + TensorRT -- **Best:** Server-grade Intel with Arc A770 or NVIDIA RTX 4070 - -### When to Upgrade vs. Optimize - -**Optimize first if:** - -- CPU usage less than 80% average -- Detector usage less than 85% -- No skipped frames -- Sufficient RAM available -- Storage not full - -**Consider hardware upgrade if:** - -- CPU consistently >90% even with hwaccel enabled -- Detector at 100% with optimized config -- Skipped frames even with reduced FPS -- Out of memory errors despite proper shm_size -- Cannot add more cameras without degradation - -**Optimization checklist before upgrading:** - -1. ✓ Hardware acceleration enabled? See the [hardware acceleration documentation](../configuration/hardware_acceleration_video.md). -2. ✓ Detect resolution ≤720p? See the [getting started guide](../guides/getting_started.md). -3. ✓ Detect FPS ≤5? -4. ✓ Motion masks configured? -5. ✓ Separate detect/record streams? -6. ✓ Object filters tuned? See the [configuration reference](../configuration/reference.md). -7. ✓ Using efficient retention mode? -8. ✓ Tmpfs cache configured? See the [getting started guide](../guides/getting_started.md). - -If all optimizations applied and still insufficient, hardware upgrade is justified. - -## 11. Common Performance Issues and Solutions - -### Issue: High CPU Usage Despite Hardware Acceleration - -**Symptoms:** - -- CPU at 80-100% even with hwaccel enabled -- FFmpeg processes consuming excessive CPU -- System becoming unresponsive - -**Diagnostic steps:** - -1. Verify hwaccel actually working (check logs for errors) in the [hardware acceleration documentation](../configuration/hardware_acceleration_video.md). -2. Confirm GPU device accessible in container -3. Check if using correct preset for your hardware -4. Verify camera streams are compatible codec (H.264/H.265) - -**Solutions:** - -- Ensure Docker container has GPU device access -- Try different hwaccel preset if auto-detection fails -- Check camera codec compatibility -- Reduce number of simultaneous streams -- Lower detect resolution further - -### Issue: Detector CPU Usage at 100% - -**Symptoms:** - -- Detection FPS below configured FPS -- "Skippped detections" noted in Camera Metrics -- Delayed object detection in UI -- Objects appearing/disappearing erratically - -**Diagnostic steps:** - -1. Check Detector CPU usage in Frigate UI System Metrics -2. Review inference speed (should be fairly consistent) -3. Count total detect FPS across all cameras -4. Calculate if exceeding detector capacity - -**Solutions:** - -- Add second detector instance as described in the [detector documentation](../configuration/object_detectors.md). -- Reduce detect FPS from 5 to 3 in the [getting started guide](../guides/getting_started.md). -- Add motion masks to reduce detection triggers -- Increase object filter thresholds in the [configuration reference](../configuration/reference.md). -- Consider upgrading detector hardware as described in the [detector documentation](../configuration/object_detectors.md). - -### Issue: Recording Gaps or Missing Footage - -**Symptoms:** - -- Gaps in timeline -- "FFmpeg process crashed" errors -- Intermittent camera connectivity - -**Diagnostic steps:** - -1. Check FFmpeg logs for specific errors -2. Verify network stability to cameras -3. Check storage space and I/O performance -4. Review retry_interval setting in the [configuration reference](../configuration/reference.md). - -**Solutions:** - -- Increase retry_interval for wireless cameras -- Verify network infrastructure (switches, WiFi) -- Check camera firmware for known issues -- Ensure sufficient storage and fast enough disk -- Consider wired connection for critical cameras - -### Issue: Out of Memory or Bus Errors - -**Symptoms:** - -- Frigate crashes with bus error -- "Failed to create shared memory" errors -- Container restarts frequently -- System becomes unresponsive - -**Diagnostic steps:** - -1. Check configured shm_size in the [getting started guide](../guides/getting_started.md). -2. Calculate actual requirements based on cameras -3. Review system memory usage -4. Check for memory leaks (increasing over time) - -**Solutions:** - -- Increase shm_size in Docker configuration -- Add tmpfs volume for cache -- Reduce number of cameras or resolution -- Ensure sufficient system RAM -- Restart Frigate to clear any memory leaks - -### Issue: Slow UI or High Latency - -**Symptoms:** - -- UI takes long to load -- Live view stuttering or delayed -- Clip playback buffering -- Timeline loading slowly - -**Diagnostic steps:** - -1. Check number of concurrent viewers -2. Review network bandwidth usage -3. Check database size and location -4. Verify go2rtc performance - -**Solutions:** - -- Limit concurrent viewers -- Use sub-streams for live viewing -- Move database to SSD -- Optimize go2rtc configuration -- Reduce recording retention to shrink database - -## Conclusion - -Optimizing Frigate performance requires a systematic approach across multiple areas. The highest-impact optimizations are: - -1. **Enable hardware acceleration for video decoding** in the [hardware acceleration documentation](../configuration/hardware_acceleration_video.md) - Reduces CPU usage by 50-80% -2. **Use a dedicated detector** in the [detector documentation](../configuration/object_detectors.md) - 10-20× faster than CPU detection -3. **Configure appropriate detect resolution and FPS** in the [getting started guide](../guides/getting_started.md) - Balance accuracy with resources -4. **Implement motion masks**- Reduce unnecessary detection cycles -5. **Use separate detect and record streams**- Optimize each stream for its purpose - -Start with these foundational optimizations, then fine-tune based on your specific hardware and requirements. Monitor system metrics regularly to identify bottlenecks and validate improvements. When software optimization reaches its limits, strategic hardware upgrades provide the next performance tier. - -Remember that every installation is unique - what works optimally for one setup may need adjustment for another. Use the debug view, logs, and system metrics to guide your optimization decisions rather than blindly copying configurations. diff --git a/docs/docs/troubleshooting/cpu.md b/docs/docs/troubleshooting/cpu.md new file mode 100644 index 000000000..bfaffe326 --- /dev/null +++ b/docs/docs/troubleshooting/cpu.md @@ -0,0 +1,73 @@ +--- +id: cpu +title: Troubleshooting High CPU Usage +--- + +High CPU usage can impact Frigate's performance and responsiveness. This guide outlines the most effective configuration changes to help reduce CPU consumption and optimize resource usage. + +## 1. Hardware Acceleration for Video Decoding + +**Priority: Critical** + +Video decoding is one of the most CPU-intensive tasks in Frigate. While an AI accelerator handles object detection, it does not assist with decoding video streams. Hardware acceleration (hwaccel) offloads this work to your GPU or specialized video decode hardware, significantly reducing CPU usage and enabling you to support more cameras on the same hardware. + +### Key Concepts + +**Resolution & FPS Impact:** The decoding burden grows exponentially with resolution and frame rate. A 4K stream at 30 FPS requires roughly 4 times the processing power of a 1080p stream at the same frame rate, and doubling the frame rate doubles the decode workload. This is why hardware acceleration becomes critical when working with multiple high-resolution cameras. + +**Hardware Acceleration Benefits:** By using dedicated video decode hardware (Intel QuickSync, NVIDIA NVDEC, AMD VCE, or VA-API), you can: + +- Significantly reduce CPU usage per camera stream +- Support 2-3x more cameras on the same hardware +- Free up CPU resources for motion detection and other Frigate processes +- Reduce system heat and power consumption + +### Configuration + +Frigate provides preset configurations for common hardware acceleration scenarios. Set up `hwaccel_args` based on your hardware in your [configuration](../configuration/reference) as described in the [getting started guide](../guides/getting_started). + +### Troubleshooting Hardware Acceleration + +If hardware acceleration isn't working: + +1. Check Frigate logs for FFmpeg errors related to hwaccel +2. Verify the hardware device is accessible inside the container +3. Ensure your camera streams use H.264 or H.265 codecs (most common) +4. Try different presets if the automatic detection fails +5. Check that your GPU drivers are properly installed on the host system + +## 2. Detector Selection and Configuration + +**Priority: Critical** + +Choosing the right detector for your hardware is the single most important factor for detection performance. The detector is responsible for running the AI model that identifies objects in video frames. Different detector types have vastly different performance characteristics and hardware requirements, as detailed in the [hardware documentation](../frigate/hardware). + +### Understanding Detector Performance + +Frigate uses motion detection as a first-line check before running expensive object detection, as explained in the [motion detection documentation](../configuration/motion_detection). When motion is detected, Frigate creates a "region" (the green boxes in the debug viewer) and sends it to the detector. The detector's inference speed determines how many detections per second your system can handle. + +**Calculating Detector Capacity:** Your detector has a finite capacity measured in detections per second. With an inference speed of 10ms, your detector can handle approximately 100 detections per second (1000ms / 10ms = 100).If your cameras collectively require more than this capacity, you'll experience delays, missed detections, or the system will fall behind. + +### Choosing the Right Detector + +Different detectors have vastly different performance characteristics, see the expected performance for object detectors in [the hardware docs](../frigate/hardware) + +### Multiple Detector Instances + +When a single detector cannot keep up with your camera count, some detector types (`openvino`, `onnx`) allow you to define multiple detector instances to share the workload. This is particularly useful with GPU-based detectors that have sufficient VRAM to run multiple inference processes. + +For detailed instructions on configuring multiple detectors, see the [Object Detectors documentation](../configuration/object_detectors). + + +**When to add a second detector:** + +- Skipped FPS is consistently > 0 even during normal activity + +### Model Selection and Optimization + +The model you use significantly impacts detector performance. Frigate provides default models optimized for each detector type, but you can customize them as described in the [detector documentation](../configuration/object_detectors). + +**Model Size Trade-offs:** + +- Smaller models (320x320): Faster inference, Frigate is specifically optimized for a 320x320 size model. +- Larger models (640x640): Slower inference, can sometimes have higher accuracy on very large objects that take up a majority of the frame. \ No newline at end of file diff --git a/docs/docs/troubleshooting/memory.md b/docs/docs/troubleshooting/memory.md index c74729e5f..22b395469 100644 --- a/docs/docs/troubleshooting/memory.md +++ b/docs/docs/troubleshooting/memory.md @@ -1,6 +1,6 @@ --- id: memory -title: Memory Troubleshooting +title: Troubleshooting Memory Usage --- Frigate includes built-in memory profiling using [memray](https://bloomberg.github.io/memray/) to help diagnose memory issues. This feature allows you to profile specific Frigate modules to identify memory leaks, excessive allocations, or other memory-related problems. diff --git a/docs/sidebars.ts b/docs/sidebars.ts index 6d4c0c981..ea0d2f5c8 100644 --- a/docs/sidebars.ts +++ b/docs/sidebars.ts @@ -12,7 +12,6 @@ const sidebars: SidebarsConfig = { "frigate/updating", "frigate/camera_setup", "frigate/video_pipeline", - "frigate/optimizing_performance", "frigate/glossary", ], Guides: [ @@ -130,10 +129,27 @@ const sidebars: SidebarsConfig = { Troubleshooting: [ "troubleshooting/faqs", "troubleshooting/recordings", - "troubleshooting/gpu", - "troubleshooting/edgetpu", - "troubleshooting/memory", "troubleshooting/dummy-camera", + { + type: "category", + label: "Troubleshooting Hardware", + link: { + type: "generated-index", + title: "Troubleshooting Hardware", + description: "Troubleshooting Problems with Hardware", + }, + items: ["troubleshooting/gpu", "troubleshooting/edgetpu"], + }, + { + type: "category", + label: "Troubleshooting Resource Usage", + link: { + type: "generated-index", + title: "Troubleshooting Resource Usage", + description: "Troubleshooting issues with resource usage", + }, + items: ["troubleshooting/cpu", "troubleshooting/memory"], + }, ], Development: [ "development/contributing", From c87fc348a7c197a447dd0982516aaaac92558626 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 1 Jan 2026 13:33:53 -0700 Subject: [PATCH 11/17] Add import --- docs/docs/configuration/hardware_acceleration_video.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/docs/configuration/hardware_acceleration_video.md b/docs/docs/configuration/hardware_acceleration_video.md index 03ab33100..bbbf5a640 100644 --- a/docs/docs/configuration/hardware_acceleration_video.md +++ b/docs/docs/configuration/hardware_acceleration_video.md @@ -3,6 +3,8 @@ id: hardware_acceleration_video title: Video Decoding --- +import CommunityBadge from '@site/src/components/CommunityBadge'; + # Video Decoding It is highly recommended to use an integrated or discrete GPU for hardware acceleration video decoding in Frigate. From df002222db53512f4411da718999ec180b517aa2 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 1 Jan 2026 13:37:36 -0700 Subject: [PATCH 12/17] Fix link --- docs/docs/frigate/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/frigate/index.md b/docs/docs/frigate/index.md index fa4e8d4f5..83162022c 100644 --- a/docs/docs/frigate/index.md +++ b/docs/docs/frigate/index.md @@ -6,7 +6,7 @@ slug: / A complete and local NVR designed for Home Assistant with AI object detection. Uses OpenCV and Tensorflow to perform realtime object detection locally for IP cameras. -Use of a [Recommended Detector](/frigate/hardware#detectors) is optional, but strongly recommended. CPU detection should only be used for testing purposes. See the [Optimizing Performance Guide](/frigate/optimizing_performance) for tips on getting the most out of your hardware. +Use of a [Recommended Detector](/frigate/hardware#detectors) is optional, but strongly recommended. CPU detection should only be used for testing purposes. - Tight integration with Home Assistant via a [custom component](https://github.com/blakeblackshear/frigate-hass-integration) - Designed to minimize resource use and maximize performance by only looking for objects when and where it is necessary From f7c754c0980feee97b1651193659790aaf085198 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 1 Jan 2026 13:45:36 -0700 Subject: [PATCH 13/17] Don't list hardware --- docs/docs/troubleshooting/cpu.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/troubleshooting/cpu.md b/docs/docs/troubleshooting/cpu.md index bfaffe326..331f34419 100644 --- a/docs/docs/troubleshooting/cpu.md +++ b/docs/docs/troubleshooting/cpu.md @@ -15,7 +15,7 @@ Video decoding is one of the most CPU-intensive tasks in Frigate. While an AI ac **Resolution & FPS Impact:** The decoding burden grows exponentially with resolution and frame rate. A 4K stream at 30 FPS requires roughly 4 times the processing power of a 1080p stream at the same frame rate, and doubling the frame rate doubles the decode workload. This is why hardware acceleration becomes critical when working with multiple high-resolution cameras. -**Hardware Acceleration Benefits:** By using dedicated video decode hardware (Intel QuickSync, NVIDIA NVDEC, AMD VCE, or VA-API), you can: +**Hardware Acceleration Benefits:** By using dedicated video decode hardware, you can: - Significantly reduce CPU usage per camera stream - Support 2-3x more cameras on the same hardware From 1cc74480ff3c3fa0ae322f7d0f671da5414ac9fd Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 1 Jan 2026 13:50:31 -0700 Subject: [PATCH 14/17] Reduce redundancy in titles --- docs/docs/troubleshooting/cpu.md | 2 +- docs/docs/troubleshooting/dummy-camera.md | 2 +- docs/docs/troubleshooting/edgetpu.md | 2 +- docs/docs/troubleshooting/gpu.md | 2 +- docs/docs/troubleshooting/memory.md | 2 +- docs/docs/troubleshooting/recordings.md | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/docs/troubleshooting/cpu.md b/docs/docs/troubleshooting/cpu.md index 331f34419..a9f449ad8 100644 --- a/docs/docs/troubleshooting/cpu.md +++ b/docs/docs/troubleshooting/cpu.md @@ -1,6 +1,6 @@ --- id: cpu -title: Troubleshooting High CPU Usage +title: High CPU Usage --- High CPU usage can impact Frigate's performance and responsiveness. This guide outlines the most effective configuration changes to help reduce CPU consumption and optimize resource usage. diff --git a/docs/docs/troubleshooting/dummy-camera.md b/docs/docs/troubleshooting/dummy-camera.md index 7e7c26ae9..c510f2ba8 100644 --- a/docs/docs/troubleshooting/dummy-camera.md +++ b/docs/docs/troubleshooting/dummy-camera.md @@ -1,6 +1,6 @@ --- id: dummy-camera -title: Troubleshooting Detection +title: Analyzing Object Detection --- When investigating object detection or tracking problems, it can be helpful to replay an exported video as a temporary "dummy" camera. This lets you reproduce issues locally, iterate on configuration (detections, zones, enrichment settings), and capture logs and clips for analysis. diff --git a/docs/docs/troubleshooting/edgetpu.md b/docs/docs/troubleshooting/edgetpu.md index af94a3d84..97b2b0040 100644 --- a/docs/docs/troubleshooting/edgetpu.md +++ b/docs/docs/troubleshooting/edgetpu.md @@ -1,6 +1,6 @@ --- id: edgetpu -title: Troubleshooting EdgeTPU +title: EdgeTPU Errors --- ## USB Coral Not Detected diff --git a/docs/docs/troubleshooting/gpu.md b/docs/docs/troubleshooting/gpu.md index a5b48246a..6399f92d8 100644 --- a/docs/docs/troubleshooting/gpu.md +++ b/docs/docs/troubleshooting/gpu.md @@ -1,6 +1,6 @@ --- id: gpu -title: Troubleshooting GPU +title: GPU Errors --- ## OpenVINO diff --git a/docs/docs/troubleshooting/memory.md b/docs/docs/troubleshooting/memory.md index 22b395469..d062944e5 100644 --- a/docs/docs/troubleshooting/memory.md +++ b/docs/docs/troubleshooting/memory.md @@ -1,6 +1,6 @@ --- id: memory -title: Troubleshooting Memory Usage +title: Memory Usage --- Frigate includes built-in memory profiling using [memray](https://bloomberg.github.io/memray/) to help diagnose memory issues. This feature allows you to profile specific Frigate modules to identify memory leaks, excessive allocations, or other memory-related problems. diff --git a/docs/docs/troubleshooting/recordings.md b/docs/docs/troubleshooting/recordings.md index d26a3614e..b1f180a82 100644 --- a/docs/docs/troubleshooting/recordings.md +++ b/docs/docs/troubleshooting/recordings.md @@ -1,6 +1,6 @@ --- id: recordings -title: Troubleshooting Recordings +title: Recordings Errors --- ## I have Frigate configured for motion recording only, but it still seems to be recording even with no motion. Why? From ba3fc116e0caf3b75c4ec585d061fae501cee2d0 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 1 Jan 2026 21:31:54 -0700 Subject: [PATCH 15/17] Add note about Intel NPU and addon --- docs/docs/frigate/installation.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/docs/frigate/installation.md b/docs/docs/frigate/installation.md index 29ddbd79f..70b4b5bc1 100644 --- a/docs/docs/frigate/installation.md +++ b/docs/docs/frigate/installation.md @@ -465,6 +465,7 @@ There are important limitations in HA OS to be aware of: - Separate local storage for media is not yet supported by Home Assistant - AMD GPUs are not supported because HA OS does not include the mesa driver. +- Intel NPUs are not supported because HA OS does not include the NPU firmware. - Nvidia GPUs are not supported because addons do not support the nvidia runtime. ::: From 8a718421accc5ff060f7bb1cca33224a589da7c3 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 1 Jan 2026 21:44:38 -0700 Subject: [PATCH 16/17] Fix ability to specify if card is using heading --- web/src/components/card/EmptyCard.tsx | 12 +++++++++++- web/src/views/events/EventView.tsx | 3 ++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/web/src/components/card/EmptyCard.tsx b/web/src/components/card/EmptyCard.tsx index 8d6b67a68..00b22d197 100644 --- a/web/src/components/card/EmptyCard.tsx +++ b/web/src/components/card/EmptyCard.tsx @@ -8,6 +8,7 @@ type EmptyCardProps = { className?: string; icon: React.ReactNode; title: string; + titleHeading?: boolean; description?: string; buttonText?: string; link?: string; @@ -16,14 +17,23 @@ export function EmptyCard({ className, icon, title, + titleHeading = true, description, buttonText, link, }: EmptyCardProps) { + let TitleComponent; + + if (titleHeading) { + TitleComponent = {title}; + } else { + TitleComponent =
{title}
; + } + return (
{icon} - {title} + {TitleComponent} {description && (
{description}
)} diff --git a/web/src/views/events/EventView.tsx b/web/src/views/events/EventView.tsx index 9e015dfe4..70067ff5c 100644 --- a/web/src/views/events/EventView.tsx +++ b/web/src/views/events/EventView.tsx @@ -762,8 +762,9 @@ function DetectionReview({ {!loading && currentItems?.length === 0 && ( } /> From 9f508fe990eb68126b92ee7cf6e158145dd55a51 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Fri, 2 Jan 2026 08:17:12 -0600 Subject: [PATCH 17/17] improve display of area percentage --- web/src/components/overlay/detail/TrackingDetails.tsx | 8 ++++++-- web/src/components/timeline/DetailStream.tsx | 10 +++++++--- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/web/src/components/overlay/detail/TrackingDetails.tsx b/web/src/components/overlay/detail/TrackingDetails.tsx index 42535d5e1..80471b8bd 100644 --- a/web/src/components/overlay/detail/TrackingDetails.tsx +++ b/web/src/components/overlay/detail/TrackingDetails.tsx @@ -849,7 +849,11 @@ function LifecycleIconRow({ () => Array.isArray(item.data.attribute_box) && item.data.attribute_box.length >= 4 - ? (item.data.attribute_box[2] * item.data.attribute_box[3]).toFixed(4) + ? ( + item.data.attribute_box[2] * + item.data.attribute_box[3] * + 100 + ).toFixed(2) : undefined, [item.data.attribute_box], ); @@ -857,7 +861,7 @@ function LifecycleIconRow({ const areaPct = useMemo( () => Array.isArray(item.data.box) && item.data.box.length >= 4 - ? (item.data.box[2] * item.data.box[3]).toFixed(4) + ? (item.data.box[2] * item.data.box[3] * 100).toFixed(2) : undefined, [item.data.box], ); diff --git a/web/src/components/timeline/DetailStream.tsx b/web/src/components/timeline/DetailStream.tsx index 9258ca457..c6413ed97 100644 --- a/web/src/components/timeline/DetailStream.tsx +++ b/web/src/components/timeline/DetailStream.tsx @@ -744,7 +744,7 @@ function LifecycleItem({ const areaPct = useMemo( () => Array.isArray(item?.data.box) && item?.data.box.length >= 4 - ? (item?.data.box[2] * item?.data.box[3]).toFixed(4) + ? (item?.data.box[2] * item?.data.box[3] * 100).toFixed(2) : undefined, [item], ); @@ -766,7 +766,11 @@ function LifecycleItem({ () => Array.isArray(item?.data.attribute_box) && item?.data.attribute_box.length >= 4 - ? (item?.data.attribute_box[2] * item?.data.attribute_box[3]).toFixed(4) + ? ( + item?.data.attribute_box[2] * + item?.data.attribute_box[3] * + 100 + ).toFixed(2) : undefined, [item], ); @@ -845,7 +849,7 @@ function LifecycleItem({ {areaPx !== undefined && areaPct !== undefined ? ( - {areaPx} {t("information.pixels", { ns: "common" })}{" "} + {t("information.pixels", { ns: "common", area: areaPx })}{" "} ·{" "} {areaPct}%