Compare commits

...

2 Commits

Author SHA1 Message Date
baudneo
33f3ea3b59
Enrichments: Allow targeting a specific GPU ID (#19342) 2025-08-18 17:43:53 -06:00
scyto
83e9ae616a
Enable Optional IPv6 Support for Nginx (#19602) 2025-08-18 17:39:12 -06:00
17 changed files with 113 additions and 50 deletions

View File

@ -10,7 +10,7 @@ echo "[INFO] Starting certsync..."
lefile="/etc/letsencrypt/live/frigate/fullchain.pem"
tls_enabled=`python3 /usr/local/nginx/get_tls_settings.py | jq -r .enabled`
tls_enabled=`python3 /usr/local/nginx/get_listen_settings.py | jq -r .enabled`
while true
do

View File

@ -85,7 +85,7 @@ python3 /usr/local/nginx/get_base_path.py | \
-out /usr/local/nginx/conf/base_path.conf
# build templates for optional TLS support
python3 /usr/local/nginx/get_tls_settings.py | \
python3 /usr/local/nginx/get_listen_settings.py | \
tempio -template /usr/local/nginx/templates/listen.gotmpl \
-out /usr/local/nginx/conf/listen.conf

View File

@ -26,6 +26,10 @@ try:
except FileNotFoundError:
config: dict[str, Any] = {}
tls_config: dict[str, Any] = config.get("tls", {"enabled": True})
tls_config: dict[str, any] = config.get("tls", {"enabled": True})
networking_config = config.get("networking", {})
ipv6_config = networking_config.get("ipv6", {"enabled": False})
print(json.dumps(tls_config))
output = {"tls": tls_config, "ipv6": ipv6_config}
print(json.dumps(output))

View File

@ -1,33 +1,45 @@
# intended for internal traffic, not protected by auth
# Internal (IPv4 always; IPv6 optional)
listen 5000;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:5000;{{ end }}{{ end }}
{{ if not .enabled }}
# intended for external traffic, protected by auth
listen 8971;
{{ if .tls }}
{{ if .tls.enabled }}
# external HTTPS (IPv4 always; IPv6 optional)
listen 8971 ssl;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971 ssl;{{ end }}{{ end }}
ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem;
# generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP
# https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
# modern configuration
ssl_protocols TLSv1.3;
ssl_prefer_server_ciphers off;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
# ACME challenge location
location /.well-known/acme-challenge/ {
default_type "text/plain";
root /etc/letsencrypt/www;
}
{{ else }}
# external HTTP (IPv4 always; IPv6 optional)
listen 8971;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
{{ end }}
{{ else }}
# intended for external traffic, protected by auth
listen 8971 ssl;
ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem;
# generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP
# https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
# modern configuration
ssl_protocols TLSv1.3;
ssl_prefer_server_ciphers off;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
# ACME challenge location
location /.well-known/acme-challenge/ {
default_type "text/plain";
root /etc/letsencrypt/www;
}
# (No tls section) default to HTTP (IPv4 always; IPv6 optional)
listen 8971;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
{{ end }}

View File

@ -73,6 +73,9 @@ Fine-tune face recognition with these optional parameters at the global level of
- Default: `100`.
- `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this.
- Default: `True`.
- `device`: Target a specific device to run the face recognition model on (multi-GPU installation).
- Default: `None`.
- Note: This setting is only applicable when using the `large` model. See [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/)
## Usage

View File

@ -67,9 +67,9 @@ Fine-tune the LPR feature using these optional parameters at the global level of
- **`min_area`**: Defines the minimum area (in pixels) a license plate must be before recognition runs.
- Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image.
- Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates.
- **`device`**: Device to use to run license plate recognition models.
- **`device`**: Device to use to run license plate detection *and* recognition models.
- Default: `CPU`
- This can be `CPU` or `GPU`. For users without a model that detects license plates natively, using a GPU may increase performance of the models, especially the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
- This can be `CPU` or one of [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/). For users without a model that detects license plates natively, using a GPU may increase performance of the models, especially the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
- **`model_size`**: The size of the model used to detect text on plates.
- Default: `small`
- This can be `small` or `large`. The `large` model uses an enhanced text detector and is more accurate at finding text on plates but slower than the `small` model. For most users, the small model is recommended. For users in countries with multiple lines of text on plates, the large model is recommended. Note that using the large model does not improve _text recognition_, but it may improve _text detection_.

View File

@ -73,6 +73,12 @@ tls:
# Optional: Enable TLS for port 8971 (default: shown below)
enabled: True
# Optional: IPv6 configuration
networking:
# Optional: Enable IPv6 on 5000, and 8971 if tls is configured (default: shown below)
ipv6:
enabled: False
# Optional: Proxy configuration
proxy:
# Optional: Mapping for headers from upstream proxies. Only used if Frigate's auth
@ -586,6 +592,9 @@ semantic_search:
# Optional: Set the model size used for embeddings. (default: shown below)
# NOTE: small model runs on CPU and large model runs on GPU
model_size: "small"
# Optional: Target a specific device to run the model (default: shown below)
# NOTE: See https://onnxruntime.ai/docs/execution-providers/ for more information
device: None
# Optional: Configuration for face recognition capability
# NOTE: enabled, min_area can be overridden at the camera level
@ -609,6 +618,9 @@ face_recognition:
blur_confidence_filter: True
# Optional: Set the model size used face recognition. (default: shown below)
model_size: small
# Optional: Target a specific device to run the model (default: shown below)
# NOTE: See https://onnxruntime.ai/docs/execution-providers/ for more information
device: None
# Optional: Configuration for license plate recognition capability
# NOTE: enabled, min_area, and enhancement can be overridden at the camera level
@ -616,6 +628,7 @@ lpr:
# Optional: Enable license plate recognition (default: shown below)
enabled: False
# Optional: The device to run the models on (default: shown below)
# NOTE: See https://onnxruntime.ai/docs/execution-providers/ for more information
device: CPU
# Optional: Set the model size used for text detection. (default: shown below)
model_size: small

View File

@ -78,17 +78,21 @@ Switching between V1 and V2 requires reindexing your embeddings. The embeddings
### GPU Acceleration
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used.
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used. You can also target a specific device in a multi-GPU installation.
```yaml
semantic_search:
enabled: True
model_size: large
# Optional, if using the 'large' model in a multi-GPU installation
device: 0
```
:::info
If the correct build is used for your GPU and the `large` model is configured, then the GPU will be detected and used automatically.
If the correct build is used for your GPU and the `large` model is configured, then the GPU will be detected and used automatically.
Specify the `device` option to target a specific GPU in a multi-GPU system (see [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/)).
If you do not specify a device, the first available GPU will be used.
See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.

View File

@ -130,6 +130,11 @@ class SemanticSearchConfig(FrigateBaseModel):
model_size: str = Field(
default="small", title="The size of the embeddings model used."
)
device: Optional[str] = Field(
default=None,
title="The device key to use for semantic search.",
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
)
class TriggerConfig(FrigateBaseModel):
@ -196,6 +201,11 @@ class FaceRecognitionConfig(FrigateBaseModel):
blur_confidence_filter: bool = Field(
default=True, title="Apply blur quality filter to face confidence."
)
device: Optional[str] = Field(
default=None,
title="The device key to use for face recognition.",
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
)
class CameraFaceRecognitionConfig(FrigateBaseModel):
@ -209,10 +219,6 @@ class CameraFaceRecognitionConfig(FrigateBaseModel):
class LicensePlateRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable license plate recognition.")
device: Optional[EnrichmentsDeviceEnum] = Field(
default=EnrichmentsDeviceEnum.CPU,
title="The device used for license plate recognition.",
)
model_size: str = Field(
default="small", title="The size of the embeddings model used."
)
@ -258,6 +264,11 @@ class LicensePlateRecognitionConfig(FrigateBaseModel):
default=False,
title="Save plates captured for LPR for debugging purposes.",
)
device: Optional[str] = Field(
default=None,
title="The device key to use for LPR.",
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
)
class CameraLicensePlateRecognitionConfig(FrigateBaseModel):

View File

@ -64,6 +64,7 @@ from .database import DatabaseConfig
from .env import EnvVars
from .logger import LoggerConfig
from .mqtt import MqttConfig
from .network import NetworkingConfig
from .proxy import ProxyConfig
from .telemetry import TelemetryConfig
from .tls import TlsConfig
@ -334,6 +335,9 @@ class FrigateConfig(FrigateBaseModel):
notifications: NotificationConfig = Field(
default_factory=NotificationConfig, title="Global notification configuration."
)
networking: NetworkingConfig = Field(
default_factory=NetworkingConfig, title="Networking configuration"
)
proxy: ProxyConfig = Field(
default_factory=ProxyConfig, title="Proxy configuration."
)

13
frigate/config/network.py Normal file
View File

@ -0,0 +1,13 @@
from pydantic import Field
from .base import FrigateBaseModel
__all__ = ["IPv6Config", "NetworkingConfig"]
class IPv6Config(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable IPv6 for port 5000 and/or 8971")
class NetworkingConfig(FrigateBaseModel):
ipv6: IPv6Config = Field(default_factory=IPv6Config, title="Network configuration")

View File

@ -269,7 +269,7 @@ class ArcFaceRecognizer(FaceRecognizer):
def __init__(self, config: FrigateConfig):
super().__init__(config)
self.mean_embs: dict[int, np.ndarray] = {}
self.face_embedder: ArcfaceEmbedding = ArcfaceEmbedding()
self.face_embedder: ArcfaceEmbedding = ArcfaceEmbedding(config.face_recognition)
self.model_builder_queue: queue.Queue | None = None
def clear(self) -> None:

View File

@ -171,7 +171,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
# don't run for non person objects
if obj_data.get("label") != "person":
logger.debug("Not a processing face for non person object.")
logger.debug("Not processing face for a non person object.")
return
# don't overwrite sub label for objects that have a sub label

View File

@ -112,9 +112,8 @@ class Embeddings:
self.embedding = JinaV2Embedding(
model_size=self.config.semantic_search.model_size,
requestor=self.requestor,
device="GPU"
if self.config.semantic_search.model_size == "large"
else "CPU",
device=config.semantic_search.device
or ("GPU" if config.semantic_search.model_size == "large" else "CPU"),
)
self.text_embedding = lambda input_data: self.embedding(
input_data, embedding_type="text"
@ -131,7 +130,8 @@ class Embeddings:
self.vision_embedding = JinaV1ImageEmbedding(
model_size=config.semantic_search.model_size,
requestor=self.requestor,
device="GPU" if config.semantic_search.model_size == "large" else "CPU",
device=config.semantic_search.device
or ("GPU" if config.semantic_search.model_size == "large" else "CPU"),
)
def update_stats(self) -> None:

View File

@ -9,6 +9,7 @@ from frigate.const import MODEL_CACHE_DIR
from frigate.log import redirect_output_to_logger
from frigate.util.downloader import ModelDownloader
from ...config import FaceRecognitionConfig
from .base_embedding import BaseEmbedding
from .runner import ONNXModelRunner
@ -111,7 +112,7 @@ class FaceNetEmbedding(BaseEmbedding):
class ArcfaceEmbedding(BaseEmbedding):
def __init__(self):
def __init__(self, config: FaceRecognitionConfig):
super().__init__(
model_name="facedet",
model_file="arcface.onnx",
@ -119,6 +120,7 @@ class ArcfaceEmbedding(BaseEmbedding):
"arcface.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/arcface.onnx",
},
)
self.config = config
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
self.tokenizer = None
self.feature_extractor = None
@ -148,7 +150,7 @@ class ArcfaceEmbedding(BaseEmbedding):
self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file),
"GPU",
device=self.config.device or "GPU",
)
def _preprocess_inputs(self, raw_inputs):

View File

@ -128,7 +128,6 @@ class JinaV1TextEmbedding(BaseEmbedding):
self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file),
self.device,
self.model_size,
)
def _preprocess_inputs(self, raw_inputs):
@ -207,7 +206,6 @@ class JinaV1ImageEmbedding(BaseEmbedding):
self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file),
self.device,
self.model_size,
)
def _preprocess_inputs(self, raw_inputs):

View File

@ -128,7 +128,6 @@ class JinaV2Embedding(BaseEmbedding):
self.runner = ONNXModelRunner(
os.path.join(self.download_path, self.model_file),
self.device,
self.model_size,
)
def _preprocess_image(self, image_data: bytes | Image.Image) -> np.ndarray: