mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-04-03 06:40:22 +00:00
Compare commits
79 Commits
cc368dd20f
...
1763dba9c2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1763dba9c2 | ||
|
|
ca1c98eab8 | ||
|
|
3eb3797bc5 | ||
|
|
f81ac43be6 | ||
|
|
4c8f4ef9fa | ||
|
|
4b72c86e77 | ||
|
|
d574d1edae | ||
|
|
d96efdbf2e | ||
|
|
0d78aca388 | ||
|
|
c0b2693c5b | ||
|
|
39ab6c8a10 | ||
|
|
0dd3713af8 | ||
|
|
52460a8fbb | ||
|
|
fea9901ae5 | ||
|
|
4db703cc3d | ||
|
|
b63a3d327d | ||
|
|
5b32ec4fbd | ||
|
|
12a80f5713 | ||
|
|
598520b5c2 | ||
|
|
89e5f60fab | ||
|
|
aa7c9f441f | ||
|
|
2aaf96d043 | ||
|
|
b7d705a028 | ||
|
|
68688a6c23 | ||
|
|
1fac827dde | ||
|
|
a1048654ee | ||
|
|
f75e55ee74 | ||
|
|
3bda638678 | ||
|
|
5cf98824f5 | ||
|
|
687e118b58 | ||
|
|
95daf0ba05 | ||
|
|
213dc97c17 | ||
|
|
ee1db240d7 | ||
|
|
79655379be | ||
|
|
f29cf43f52 | ||
|
|
cd54a81150 | ||
|
|
178117183e | ||
|
|
d69916694b | ||
|
|
fc608b31d7 | ||
|
|
9ffa7f140c | ||
|
|
6563f78ab0 | ||
|
|
4339f26ded | ||
|
|
86a3e31edf | ||
|
|
7238b3ea22 | ||
|
|
ea576e7468 | ||
|
|
5593495abd | ||
|
|
fd96cd5dae | ||
|
|
e0c1fea2ac | ||
|
|
f97629433d | ||
|
|
aabd5b0077 | ||
|
|
623bc72633 | ||
|
|
f3a0d519fb | ||
|
|
460e291bf1 | ||
|
|
ee51326d35 | ||
|
|
948b087d3c | ||
|
|
77589c18f4 | ||
|
|
6a62467998 | ||
|
|
6857cc2b97 | ||
|
|
37618b0f57 | ||
|
|
e7f6e069f6 | ||
|
|
ee4767b1ce | ||
|
|
6cb5cfb0c9 | ||
|
|
7cfa818e63 | ||
|
|
0764fea159 | ||
|
|
e3ed1ab8ec | ||
|
|
b01b1faa3f | ||
|
|
efbc1f836b | ||
|
|
7c33f9c579 | ||
|
|
a9255bddb5 | ||
|
|
6d80a19518 | ||
|
|
011a2dbfaf | ||
|
|
9a54c8ca49 | ||
|
|
cc99330063 | ||
|
|
7e6a241e03 | ||
|
|
2d281855fc | ||
|
|
22cc698b4e | ||
|
|
5a5a54fc66 | ||
|
|
6536368467 | ||
|
|
dc79af2d98 |
7
.github/workflows/ci.yml
vendored
7
.github/workflows/ci.yml
vendored
@ -41,6 +41,7 @@ jobs:
|
||||
target: frigate
|
||||
tags: ${{ steps.setup.outputs.image-name }}-amd64
|
||||
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
|
||||
cache-to: type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max
|
||||
arm64_build:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
name: ARM Build
|
||||
@ -161,8 +162,8 @@ jobs:
|
||||
files: docker/tensorrt/trt.hcl
|
||||
set: |
|
||||
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt
|
||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max
|
||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-tensorrt
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-tensorrt,mode=max
|
||||
- name: AMD/ROCm general build
|
||||
env:
|
||||
AMDGPU: gfx
|
||||
@ -176,7 +177,7 @@ jobs:
|
||||
set: |
|
||||
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-rocm,mode=max
|
||||
*.cache-from=type=gha
|
||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-rocm
|
||||
arm64_extra_builds:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
name: ARM Extra Build
|
||||
|
||||
@ -39,7 +39,7 @@
|
||||
<img width="800" alt="实时监控面板" src="https://github.com/blakeblackshear/frigate/assets/569905/5e713cb9-9db5-41dc-947a-6937c3bc376e">
|
||||
</div>
|
||||
|
||||
### 简单的审查工作流程
|
||||
### 简单的核查工作流程
|
||||
<div>
|
||||
<img width="800" alt="简单的审查工作流程" src="https://github.com/blakeblackshear/frigate/assets/569905/6fed96e8-3b18-40e5-9ddc-31e6f3c9f2ff">
|
||||
</div>
|
||||
@ -60,6 +60,11 @@
|
||||
|
||||
|
||||
## 非官方中文讨论社区
|
||||
欢迎加入中文讨论QQ群:1043861059
|
||||
欢迎加入中文讨论QQ群:[1043861059](https://qm.qq.com/q/7vQKsTmSz)
|
||||
|
||||
Bilibili:https://space.bilibili.com/3546894915602564
|
||||
|
||||
|
||||
## 中文社区赞助商
|
||||
[](https://edgeone.ai/zh?from=github)
|
||||
本项目 CDN 加速及安全防护由 Tencent EdgeOne 赞助
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential cmake git wget
|
||||
|
||||
hailo_version="4.20.1"
|
||||
hailo_version="4.21.0"
|
||||
arch=$(uname -m)
|
||||
|
||||
if [[ $arch == "x86_64" ]]; then
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
hailo_version="4.20.1"
|
||||
hailo_version="4.21.0"
|
||||
|
||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
arch="x86_64"
|
||||
|
||||
@ -6,24 +6,29 @@ ARG DEBIAN_FRONTEND=noninteractive
|
||||
# Globally set pip break-system-packages option to avoid having to specify it every time
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
|
||||
FROM tensorrt-base AS frigate-tensorrt
|
||||
FROM wheels AS trt-wheels
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||
ENV TRT_VER=8.6.1
|
||||
|
||||
# Install TensorRT wheels
|
||||
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
|
||||
RUN pip3 install -U -r /requirements-tensorrt.txt && ldconfig
|
||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||
RUN pip3 wheel --wheel-dir=/trt-wheels -c /requirements-wheels.txt -r /requirements-tensorrt.txt
|
||||
|
||||
FROM deps AS frigate-tensorrt
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
pip3 uninstall -y onnxruntime-openvino tensorflow-cpu \
|
||||
&& pip3 install -U /deps/trt-wheels/*.whl
|
||||
|
||||
COPY --from=rootfs / /
|
||||
COPY docker/tensorrt/detector/rootfs/etc/ld.so.conf.d /etc/ld.so.conf.d
|
||||
RUN ldconfig
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
|
||||
# Dev Container w/ TRT
|
||||
FROM devcontainer AS devcontainer-trt
|
||||
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
COPY --from=trt-deps /usr/local/cuda-12.1 /usr/local/cuda
|
||||
COPY docker/tensorrt/detector/rootfs/ /
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
pip3 install -U /deps/trt-wheels/*.whl
|
||||
|
||||
@ -1,9 +1,61 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
# syntax=docker/dockerfile:1.6
|
||||
|
||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
ARG BASE_IMAGE
|
||||
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.12-py3
|
||||
|
||||
# Build TensorRT-specific library
|
||||
FROM ${TRT_BASE} AS trt-deps
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG COMPUTE_LEVEL
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y git build-essential cuda-nvcc-* cuda-nvtx-* libnvinfer-dev libnvinfer-plugin-dev libnvparsers-dev libnvonnxparsers-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \
|
||||
/tensorrt_libyolo.sh
|
||||
|
||||
# COPY required individual CUDA deps
|
||||
RUN mkdir -p /usr/local/cuda-deps
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libcurand.so.* /usr/local/cuda-deps/ && \
|
||||
cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.so.* /usr/local/cuda-deps/ && \
|
||||
cd /usr/local/cuda-deps/ && \
|
||||
for lib in libnvrtc.so.*; do \
|
||||
if [[ "$lib" =~ libnvrtc.so\.([0-9]+\.[0-9]+\.[0-9]+) ]]; then \
|
||||
version="${BASH_REMATCH[1]}"; \
|
||||
ln -sf "libnvrtc.so.$version" libnvrtc.so; \
|
||||
fi; \
|
||||
done && \
|
||||
for lib in libcurand.so.*; do \
|
||||
if [[ "$lib" =~ libcurand.so\.([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+) ]]; then \
|
||||
version="${BASH_REMATCH[1]}"; \
|
||||
ln -sf "libcurand.so.$version" libcurand.so; \
|
||||
fi; \
|
||||
done; \
|
||||
fi
|
||||
|
||||
# Frigate w/ TensorRT Support as separate image
|
||||
FROM deps AS tensorrt-base
|
||||
|
||||
#Disable S6 Global timeout
|
||||
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
||||
|
||||
# COPY TensorRT Model Generation Deps
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
|
||||
# COPY Individual CUDA deps folder
|
||||
COPY --from=trt-deps /usr/local/cuda-deps /usr/local/cuda
|
||||
|
||||
COPY docker/tensorrt/detector/rootfs/ /
|
||||
ENV YOLO_MODELS=""
|
||||
|
||||
HEALTHCHECK --start-period=600s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
|
||||
CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
|
||||
|
||||
FROM ${BASE_IMAGE} AS build-wheels
|
||||
ARG DEBIAN_FRONTEND
|
||||
|
||||
@ -100,4 +152,4 @@ WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
|
||||
# Fixes "Error importing detector runtime: /usr/lib/aarch64-linux-gnu/libstdc++.so.6: cannot allocate memory in static TLS block"
|
||||
ENV LD_PRELOAD /usr/lib/aarch64-linux-gnu/libstdc++.so.6
|
||||
ENV LD_PRELOAD /usr/lib/aarch64-linux-gnu/libstdc++.so.6
|
||||
@ -1,57 +0,0 @@
|
||||
# syntax=docker/dockerfile:1.6
|
||||
|
||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.12-py3
|
||||
|
||||
# Build TensorRT-specific library
|
||||
FROM ${TRT_BASE} AS trt-deps
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG COMPUTE_LEVEL
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y git build-essential cuda-nvcc-* cuda-nvtx-* libnvinfer-dev libnvinfer-plugin-dev libnvparsers-dev libnvonnxparsers-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \
|
||||
/tensorrt_libyolo.sh
|
||||
|
||||
# COPY required individual CUDA deps
|
||||
RUN mkdir -p /usr/local/cuda-deps
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libcurand.so.* /usr/local/cuda-deps/ && \
|
||||
cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.so.* /usr/local/cuda-deps/ && \
|
||||
cd /usr/local/cuda-deps/ && \
|
||||
for lib in libnvrtc.so.*; do \
|
||||
if [[ "$lib" =~ libnvrtc.so\.([0-9]+\.[0-9]+\.[0-9]+) ]]; then \
|
||||
version="${BASH_REMATCH[1]}"; \
|
||||
ln -sf "libnvrtc.so.$version" libnvrtc.so; \
|
||||
fi; \
|
||||
done && \
|
||||
for lib in libcurand.so.*; do \
|
||||
if [[ "$lib" =~ libcurand.so\.([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+) ]]; then \
|
||||
version="${BASH_REMATCH[1]}"; \
|
||||
ln -sf "libcurand.so.$version" libcurand.so; \
|
||||
fi; \
|
||||
done; \
|
||||
fi
|
||||
|
||||
# Frigate w/ TensorRT Support as separate image
|
||||
FROM deps AS tensorrt-base
|
||||
|
||||
#Disable S6 Global timeout
|
||||
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
||||
|
||||
# COPY TensorRT Model Generation Deps
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
|
||||
# COPY Individual CUDA deps folder
|
||||
COPY --from=trt-deps /usr/local/cuda-deps /usr/local/cuda
|
||||
|
||||
COPY docker/tensorrt/detector/rootfs/ /
|
||||
ENV YOLO_MODELS=""
|
||||
|
||||
HEALTHCHECK --start-period=600s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
|
||||
CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
|
||||
@ -1,7 +1,6 @@
|
||||
/usr/local/lib
|
||||
/usr/local/cuda
|
||||
/usr/local/lib/python3.11/dist-packages/tensorrt
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/cudnn/lib
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_runtime/lib
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/cublas/lib
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/cufft/lib
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/cufft/lib
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/curand/lib/
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_nvrtc/lib/
|
||||
@ -1,17 +1,18 @@
|
||||
# NVidia TensorRT Support (amd64 only)
|
||||
--extra-index-url 'https://pypi.nvidia.com'
|
||||
numpy < 1.24; platform_machine == 'x86_64'
|
||||
tensorrt == 8.6.1; platform_machine == 'x86_64'
|
||||
tensorrt_bindings == 8.6.1; platform_machine == 'x86_64'
|
||||
cuda-python == 11.8.*; platform_machine == 'x86_64'
|
||||
cython == 3.0.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
|
||||
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
|
||||
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
|
||||
nvidia-cudnn-cu12 == 9.5.0.*; platform_machine == 'x86_64'
|
||||
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
|
||||
nvidia-cufft-cu12==11.*; platform_machine == 'x86_64'
|
||||
cython==3.0.*; platform_machine == 'x86_64'
|
||||
nvidia_cuda_cupti_cu12==12.5.82; platform_machine == 'x86_64'
|
||||
nvidia-cublas-cu12==12.5.3.*; platform_machine == 'x86_64'
|
||||
nvidia-cudnn-cu12==9.3.0.*; platform_machine == 'x86_64'
|
||||
nvidia-cufft-cu12==11.2.3.*; platform_machine == 'x86_64'
|
||||
nvidia-curand-cu12==10.3.6.*; platform_machine == 'x86_64'
|
||||
nvidia_cuda_nvcc_cu12==12.5.82; platform_machine == 'x86_64'
|
||||
nvidia-cuda-nvrtc-cu12==12.5.82; platform_machine == 'x86_64'
|
||||
nvidia_cuda_runtime_cu12==12.5.82; platform_machine == 'x86_64'
|
||||
nvidia_cusolver_cu12==11.6.3.*; platform_machine == 'x86_64'
|
||||
nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64'
|
||||
nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64'
|
||||
nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64'
|
||||
onnx==1.16.*; platform_machine == 'x86_64'
|
||||
onnxruntime-gpu==1.20.*; platform_machine == 'x86_64'
|
||||
protobuf==3.20.3; platform_machine == 'x86_64'
|
||||
|
||||
@ -79,21 +79,13 @@ target "trt-deps" {
|
||||
inherits = ["_build_args"]
|
||||
}
|
||||
|
||||
target "tensorrt-base" {
|
||||
dockerfile = "docker/tensorrt/Dockerfile.base"
|
||||
context = "."
|
||||
contexts = {
|
||||
deps = "target:deps",
|
||||
}
|
||||
inherits = ["_build_args"]
|
||||
}
|
||||
|
||||
target "tensorrt" {
|
||||
dockerfile = "docker/tensorrt/Dockerfile.${ARCH}"
|
||||
context = "."
|
||||
contexts = {
|
||||
wget = "target:wget",
|
||||
tensorrt-base = "target:tensorrt-base",
|
||||
wheels = "target:wheels",
|
||||
deps = "target:deps",
|
||||
rootfs = "target:rootfs"
|
||||
}
|
||||
target = "frigate-tensorrt"
|
||||
|
||||
@ -112,7 +112,7 @@ python3 -c 'import secrets; print(secrets.token_hex(64))'
|
||||
|
||||
### Header mapping
|
||||
|
||||
If you have disabled Frigate's authentication and your proxy supports passing a header with authenticated usernames and/or roles, you can use the `header_map` config to specify the header name so it is passed to Frigate. For example, the following will map the `X-Forwarded-User` and `X-Forwarded-Role` values. Header names are not case sensitive. Multiple values can be included in the role header. Frigate expects that the character separating the roles is a comma, but this can be specified using the `separator` config entry.
|
||||
If you have disabled Frigate's authentication and your proxy supports passing a header with authenticated usernames and/or roles, you can use the `header_map` config to specify the header name so it is passed to Frigate. For example, the following will map the `X-Forwarded-User` and `X-Forwarded-Groups` values. Header names are not case sensitive. Multiple values can be included in the role header. Frigate expects that the character separating the roles is a comma, but this can be specified using the `separator` config entry.
|
||||
|
||||
```yaml
|
||||
proxy:
|
||||
@ -120,7 +120,7 @@ proxy:
|
||||
separator: "|" # This value defaults to a comma, but Authentik uses a pipe, for example.
|
||||
header_map:
|
||||
user: x-forwarded-user
|
||||
role: x-forwarded-role
|
||||
role: x-forwarded-groups
|
||||
```
|
||||
|
||||
Frigate supports both `admin` and `viewer` roles (see below). When using port `8971`, Frigate validates these headers and subsequent requests use the headers `remote-user` and `remote-role` for authorization.
|
||||
|
||||
@ -243,3 +243,38 @@ ffmpeg:
|
||||
### TP-Link VIGI Cameras
|
||||
|
||||
TP-Link VIGI cameras need some adjustments to the main stream settings on the camera itself to avoid issues. The stream needs to be configured as `H264` with `Smart Coding` set to `off`. Without these settings you may have problems when trying to watch recorded footage. For example Firefox will stop playback after a few seconds and show the following error message: `The media playback was aborted due to a corruption problem or because the media used features your browser did not support.`.
|
||||
|
||||
## USB Cameras (aka Webcams)
|
||||
|
||||
To use a USB camera (webcam) with Frigate, the recommendation is to use go2rtc's [FFmpeg Device](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#source-ffmpeg-device) support:
|
||||
|
||||
- Preparation outside of Frigate:
|
||||
- Get USB camera path. Run `v4l2-ctl --list-devices` to get a listing of locally-connected cameras available. (You may need to install `v4l-utils` in a way appropriate for your Linux distribution). In the sample configuration below, we use `video=0` to correlate with a detected device path of `/dev/video0`
|
||||
- Get USB camera formats & resolutions. Run `ffmpeg -f v4l2 -list_formats all -i /dev/video0` to get an idea of what formats and resolutions the USB Camera supports. In the sample configuration below, we use a width of 1024 and height of 576 in the stream and detection settings based on what was reported back.
|
||||
- If using Frigate in a container (e.g. Docker on TrueNAS), ensure you have USB Passthrough support enabled, along with a specific Host Device (`/dev/video0`) + Container Device (`/dev/video0`) listed.
|
||||
|
||||
- In your Frigate Configuration File, add the go2rtc stream and roles as appropriate:
|
||||
|
||||
```
|
||||
go2rtc:
|
||||
streams:
|
||||
usb_camera:
|
||||
- "ffmpeg:device?video=0&video_size=1024x576#video=h264"
|
||||
|
||||
cameras:
|
||||
usb_camera:
|
||||
enabled: true
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://127.0.0.1:8554/usb_camera
|
||||
input_args: preset-rtsp-restream
|
||||
roles:
|
||||
- detect
|
||||
- record
|
||||
detect:
|
||||
enabled: false # <---- disable detection until you have a working camera feed
|
||||
width: 1024
|
||||
height: 576
|
||||
```
|
||||
|
||||
|
||||
|
||||
@ -97,9 +97,12 @@ This list of working and non-working PTZ cameras is based on user feedback.
|
||||
| Amcrest ASH21 | ✅ | ❌ | ONVIF service port: 80 |
|
||||
| Amcrest IP4M-S2112EW-AI | ✅ | ❌ | FOV relative movement not supported. |
|
||||
| Amcrest IP5M-1190EW | ✅ | ❌ | ONVIF Port: 80. FOV relative movement not supported. |
|
||||
| Annke CZ504 | ✅ | ✅ | Annke support provide specific firmware ([V5.7.1 build 250227](https://github.com/pierrepinon/annke_cz504/raw/refs/heads/main/digicap_V5-7-1_build_250227.dav)) to fix issue with ONVIF "TranslationSpaceFov" |
|
||||
| Ctronics PTZ | ✅ | ❌ | |
|
||||
| Dahua | ✅ | ✅ | Some low-end Dahuas (lite series, among others) have been reported to not support autotracking |
|
||||
| Dahua DH-SD2A500HB | ✅ | ❌ | |
|
||||
| Dahua DH-SD49825GB-HNR | ✅ | ✅ | |
|
||||
| Dahua DH-P5AE-PV | ❌ | ❌ | |
|
||||
| Foscam R5 | ✅ | ❌ | |
|
||||
| Hanwha XNP-6550RH | ✅ | ❌ | |
|
||||
| Hikvision | ✅ | ❌ | Incomplete ONVIF support (MoveStatus won't update even on latest firmware) - reported with HWP-N4215IH-DE and DS-2DE3304W-DE, but likely others |
|
||||
|
||||
@ -67,7 +67,7 @@ Fine-tune face recognition with these optional parameters at the global level of
|
||||
- Default: `0.8`.
|
||||
- `recognition_threshold`: Recognition confidence score required to add the face to the object as a sub label.
|
||||
- Default: `0.9`.
|
||||
- `min_faces`: Min face attempts for the sub label to be applied to the person object.
|
||||
- `min_faces`: Min face recognitions for the sub label to be applied to the person object.
|
||||
- Default: `1`
|
||||
- `save_attempts`: Number of images of recognized faces to save for training.
|
||||
- Default: `100`.
|
||||
|
||||
@ -71,11 +71,11 @@ cameras:
|
||||
|
||||
Output args presets help make the config more readable and handle use cases for different types of streams to ensure consistent recordings.
|
||||
|
||||
| Preset | Usage | Other Notes |
|
||||
| -------------------------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| preset-record-generic | Record WITHOUT audio | This is the default when nothing is specified |
|
||||
| preset-record-generic-audio-copy | Record WITH original audio | Use this to enable audio in recordings |
|
||||
| preset-record-generic-audio-aac | Record WITH transcoded aac audio | Use this to transcode to aac audio. If your source is already aac, use preset-record-generic-audio-copy instead to avoid re-encoding |
|
||||
| preset-record-mjpeg | Record an mjpeg stream | Recommend restreaming mjpeg stream instead |
|
||||
| preset-record-jpeg | Record live jpeg | Recommend restreaming live jpeg instead |
|
||||
| preset-record-ubiquiti | Record ubiquiti stream with audio | Recordings with ubiquiti non-standard audio |
|
||||
| Preset | Usage | Other Notes |
|
||||
| -------------------------------- | --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| preset-record-generic | Record WITHOUT audio | If your camera doesn’t have audio, or if you don’t want to record audio, use this option |
|
||||
| preset-record-generic-audio-copy | Record WITH original audio | Use this to enable audio in recordings |
|
||||
| preset-record-generic-audio-aac | Record WITH transcoded aac audio | This is the default when no option is specified. Use it to transcode audio to AAC. If the source is already in AAC format, use preset-record-generic-audio-copy instead to avoid unnecessary re-encoding |
|
||||
| preset-record-mjpeg | Record an mjpeg stream | Recommend restreaming mjpeg stream instead |
|
||||
| preset-record-jpeg | Record live jpeg | Recommend restreaming live jpeg instead |
|
||||
| preset-record-ubiquiti | Record ubiquiti stream with audio | Recordings with ubiquiti non-standard audio |
|
||||
|
||||
@ -21,12 +21,23 @@ genai:
|
||||
model: gemini-1.5-flash
|
||||
|
||||
cameras:
|
||||
front_camera: ...
|
||||
front_camera:
|
||||
genai:
|
||||
enabled: True # <- enable GenAI for your front camera
|
||||
use_snapshot: True
|
||||
objects:
|
||||
- person
|
||||
required_zones:
|
||||
- steps
|
||||
indoor_camera:
|
||||
genai: # <- disable GenAI for your indoor camera
|
||||
enabled: False
|
||||
genai:
|
||||
enabled: False # <- disable GenAI for your indoor camera
|
||||
```
|
||||
|
||||
By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
|
||||
|
||||
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
|
||||
|
||||
## Ollama
|
||||
|
||||
:::warning
|
||||
@ -185,9 +196,7 @@ genai:
|
||||
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
|
||||
```
|
||||
|
||||
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
|
||||
|
||||
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
|
||||
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
|
||||
@ -176,16 +176,26 @@ For more information on the various values across different distributions, see h
|
||||
|
||||
Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=2 >> /etc/sysctl.d/local.conf'`
|
||||
|
||||
#### Stats for SR-IOV devices
|
||||
#### Stats for SR-IOV or other devices
|
||||
|
||||
When using virtualized GPUs via SR-IOV, additional args are needed for GPU stats to function. This can be enabled with the following config:
|
||||
When using virtualized GPUs via SR-IOV, you need to specify the device path to use to gather stats from `intel_gpu_top`. This example may work for some systems using SR-IOV:
|
||||
|
||||
```yaml
|
||||
telemetry:
|
||||
stats:
|
||||
sriov: True
|
||||
intel_gpu_device: "sriov"
|
||||
```
|
||||
|
||||
For other virtualized GPUs, try specifying the direct path to the device instead:
|
||||
|
||||
```yaml
|
||||
telemetry:
|
||||
stats:
|
||||
intel_gpu_device: "drm:/dev/dri/card0"
|
||||
```
|
||||
|
||||
If you are passing in a device path, make sure you've passed the device through to the container.
|
||||
|
||||
## AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver
|
||||
|
||||
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
|
||||
|
||||
@ -172,7 +172,7 @@ For devices that support two way talk, Frigate can be configured to use the feat
|
||||
|
||||
- Set up go2rtc with [WebRTC](#webrtc-extra-configuration).
|
||||
- Ensure you access Frigate via https (may require [opening port 8971](/frigate/installation/#ports)).
|
||||
- For the Home Assistant Frigate card, [follow the docs](https://github.com/dermotduffy/frigate-hass-card?tab=readme-ov-file#using-2-way-audio) for the correct source.
|
||||
- For the Home Assistant Frigate card, [follow the docs](http://card.camera/#/usage/2-way-audio) for the correct source.
|
||||
|
||||
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell)
|
||||
|
||||
|
||||
@ -24,10 +24,13 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
- [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection.
|
||||
- [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured.
|
||||
|
||||
**Nvidia**
|
||||
**Nvidia GPU**
|
||||
|
||||
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs and Jetson devices, using one of many default models.
|
||||
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` or `-tensorrt-jp6` Frigate images when a supported ONNX model is configured.
|
||||
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
|
||||
|
||||
**Nvidia Jetson**
|
||||
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Jetson devices, using one of many default models.
|
||||
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt-jp6` Frigate image when a supported ONNX model is configured.
|
||||
|
||||
**Rockchip**
|
||||
|
||||
@ -399,111 +402,6 @@ model:
|
||||
|
||||
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
|
||||
|
||||
## NVidia TensorRT Detector
|
||||
|
||||
Nvidia GPUs may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt`. This detector is designed to work with Yolo models for object detection.
|
||||
|
||||
### Minimum Hardware Support
|
||||
|
||||
The TensorRT detector uses the 12.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=545`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
|
||||
|
||||
To use the TensorRT detector, make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU.
|
||||
|
||||
There are improved capabilities in newer GPU architectures that TensorRT can benefit from, such as INT8 operations and Tensor cores. The features compatible with your hardware will be optimized when the model is converted to a trt file. Currently the script provided for generating the model provides a switch to enable/disable FP16 operations. If you wish to use newer features such as INT8 optimization, more work is required.
|
||||
|
||||
#### Compatibility References:
|
||||
|
||||
[NVIDIA TensorRT Support Matrix](https://docs.nvidia.com/deeplearning/tensorrt/archives/tensorrt-841/support-matrix/index.html)
|
||||
|
||||
[NVIDIA CUDA Compatibility](https://docs.nvidia.com/deploy/cuda-compatibility/index.html)
|
||||
|
||||
[NVIDIA GPU Compute Capability](https://developer.nvidia.com/cuda-gpus)
|
||||
|
||||
### Generate Models
|
||||
|
||||
The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is included that will build several common models.
|
||||
|
||||
The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host.
|
||||
|
||||
By default, no models will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
|
||||
|
||||
If you have a Jetson device with DLAs (Xavier or Orin), you can generate a model that will run on the DLA by appending `-dla` to your model name, e.g. specify `YOLO_MODELS=yolov7-320-dla`. The model will run on DLA0 (Frigate does not currently support DLA1). DLA-incompatible layers will fall back to running on the GPU.
|
||||
|
||||
If your GPU does not support FP16 operations, you can pass the environment variable `USE_FP16=False` to disable it.
|
||||
|
||||
Specific models can be selected by passing an environment variable to the `docker run` command or in your `docker-compose.yml` file. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below.
|
||||
|
||||
<details>
|
||||
<summary>Available Models</summary>
|
||||
```
|
||||
yolov3-288
|
||||
yolov3-416
|
||||
yolov3-608
|
||||
yolov3-spp-288
|
||||
yolov3-spp-416
|
||||
yolov3-spp-608
|
||||
yolov3-tiny-288
|
||||
yolov3-tiny-416
|
||||
yolov4-288
|
||||
yolov4-416
|
||||
yolov4-608
|
||||
yolov4-csp-256
|
||||
yolov4-csp-512
|
||||
yolov4-p5-448
|
||||
yolov4-p5-896
|
||||
yolov4-tiny-288
|
||||
yolov4-tiny-416
|
||||
yolov4x-mish-320
|
||||
yolov4x-mish-640
|
||||
yolov7-tiny-288
|
||||
yolov7-tiny-416
|
||||
yolov7-640
|
||||
yolov7-416
|
||||
yolov7-320
|
||||
yolov7x-640
|
||||
yolov7x-320
|
||||
```
|
||||
</details>
|
||||
|
||||
An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yolov7x-640` models for a Pascal card would look something like this:
|
||||
|
||||
```yml
|
||||
frigate:
|
||||
environment:
|
||||
- YOLO_MODELS=yolov7-320,yolov7x-640
|
||||
- USE_FP16=false
|
||||
```
|
||||
|
||||
If you have multiple GPUs passed through to Frigate, you can specify which one to use for the model conversion. The conversion script will use the first visible GPU, however in systems with mixed GPU models you may not want to use the default index for object detection. Add the `TRT_MODEL_PREP_DEVICE` environment variable to select a specific GPU.
|
||||
|
||||
```yml
|
||||
frigate:
|
||||
environment:
|
||||
- TRT_MODEL_PREP_DEVICE=0 # Optionally, select which GPU is used for model optimization
|
||||
```
|
||||
|
||||
### Configuration Parameters
|
||||
|
||||
The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration_video.md#nvidia-gpus) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container.
|
||||
|
||||
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
|
||||
|
||||
Use the config below to work with generated TRT models:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
tensorrt:
|
||||
type: tensorrt
|
||||
device: 0 #This is the default, select the first GPU
|
||||
|
||||
model:
|
||||
path: /config/model_cache/tensorrt/yolov7-320.trt
|
||||
input_tensor: nchw
|
||||
input_pixel_format: rgb
|
||||
width: 320
|
||||
height: 320
|
||||
```
|
||||
|
||||
## AMD/ROCm GPU detector
|
||||
|
||||
### Setup
|
||||
@ -801,6 +699,88 @@ To verify that the integration is working correctly, start Frigate and observe t
|
||||
|
||||
# Community Supported Detectors
|
||||
|
||||
## NVidia TensorRT Detector
|
||||
|
||||
Nvidia Jetson devices may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt-jp6` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp6`. This detector is designed to work with Yolo models for object detection.
|
||||
|
||||
### Generate Models
|
||||
|
||||
The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is included that will build several common models.
|
||||
|
||||
The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host.
|
||||
|
||||
By default, no models will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
|
||||
|
||||
If you have a Jetson device with DLAs (Xavier or Orin), you can generate a model that will run on the DLA by appending `-dla` to your model name, e.g. specify `YOLO_MODELS=yolov7-320-dla`. The model will run on DLA0 (Frigate does not currently support DLA1). DLA-incompatible layers will fall back to running on the GPU.
|
||||
|
||||
If your GPU does not support FP16 operations, you can pass the environment variable `USE_FP16=False` to disable it.
|
||||
|
||||
Specific models can be selected by passing an environment variable to the `docker run` command or in your `docker-compose.yml` file. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below.
|
||||
|
||||
<details>
|
||||
<summary>Available Models</summary>
|
||||
```
|
||||
yolov3-288
|
||||
yolov3-416
|
||||
yolov3-608
|
||||
yolov3-spp-288
|
||||
yolov3-spp-416
|
||||
yolov3-spp-608
|
||||
yolov3-tiny-288
|
||||
yolov3-tiny-416
|
||||
yolov4-288
|
||||
yolov4-416
|
||||
yolov4-608
|
||||
yolov4-csp-256
|
||||
yolov4-csp-512
|
||||
yolov4-p5-448
|
||||
yolov4-p5-896
|
||||
yolov4-tiny-288
|
||||
yolov4-tiny-416
|
||||
yolov4x-mish-320
|
||||
yolov4x-mish-640
|
||||
yolov7-tiny-288
|
||||
yolov7-tiny-416
|
||||
yolov7-640
|
||||
yolov7-416
|
||||
yolov7-320
|
||||
yolov7x-640
|
||||
yolov7x-320
|
||||
```
|
||||
</details>
|
||||
|
||||
An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yolov7x-640` models would look something like this:
|
||||
|
||||
```yml
|
||||
frigate:
|
||||
environment:
|
||||
- YOLO_MODELS=yolov7-320,yolov7x-640
|
||||
- USE_FP16=false
|
||||
```
|
||||
|
||||
### Configuration Parameters
|
||||
|
||||
The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration_video.md#nvidia-gpus) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container.
|
||||
|
||||
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
|
||||
|
||||
Use the config below to work with generated TRT models:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
tensorrt:
|
||||
type: tensorrt
|
||||
device: 0 #This is the default, select the first GPU
|
||||
|
||||
model:
|
||||
path: /config/model_cache/tensorrt/yolov7-320.trt
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
input_tensor: nchw
|
||||
input_pixel_format: rgb
|
||||
width: 320
|
||||
height: 320
|
||||
```
|
||||
|
||||
## Rockchip platform
|
||||
|
||||
Hardware accelerated object detection is supported on the following SoCs:
|
||||
|
||||
@ -20,5 +20,5 @@ In order to install Frigate as a PWA, the following requirements must be met:
|
||||
Installation varies slightly based on the device that is being used:
|
||||
|
||||
- Desktop: Use the install button typically found in right edge of the address bar
|
||||
- Android: Use the `Install as App` button in the more options menu
|
||||
- iOS: Use the `Add to Homescreen` button in the share menu
|
||||
- Android: Use the `Install as App` button in the more options menu for Chrome, and the `Add app to Home screen` button for Firefox
|
||||
- iOS: Use the `Add to Homescreen` button in the share menu
|
||||
|
||||
@ -561,7 +561,7 @@ face_recognition:
|
||||
recognition_threshold: 0.9
|
||||
# Optional: Min area of detected face box to consider running face recognition (default: shown below)
|
||||
min_area: 500
|
||||
# Optional: Min face attempts for the sub label to be applied to the person object (default: shown below)
|
||||
# Optional: Min face recognitions for the sub label to be applied to the person object (default: shown below)
|
||||
min_faces: 1
|
||||
# Optional: Number of images of recognized faces to save for training (default: shown below)
|
||||
save_attempts: 100
|
||||
@ -903,7 +903,7 @@ telemetry:
|
||||
# Optional: Enable Intel GPU stats (default: shown below)
|
||||
intel_gpu_stats: True
|
||||
# Optional: Treat GPU as SR-IOV to fix GPU stats (default: shown below)
|
||||
sriov: False
|
||||
intel_gpu_device: None
|
||||
# Optional: Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
|
||||
# NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled.
|
||||
network_bandwidth: False
|
||||
|
||||
@ -21,6 +21,21 @@ In 0.14 and later, all of that is bundled into a single review item which starts
|
||||
|
||||
Not every segment of video captured by Frigate may be of the same level of interest to you. Video of people who enter your property may be a different priority than those walking by on the sidewalk. For this reason, Frigate 0.14 categorizes review items as _alerts_ and _detections_. By default, all person and car objects are considered alerts. You can refine categorization of your review items by configuring required zones for them.
|
||||
|
||||
:::note
|
||||
|
||||
Alerts and detections categorize the tracked objects in review items, but Frigate must first detect those objects with your configured object detector (Coral, OpenVINO, etc). By default, the object tracker only detects `person`. Setting `labels` for `alerts` and `detections` does not automatically enable detection of new objects. To detect more than `person`, you should add the following to your config:
|
||||
|
||||
```yaml
|
||||
objects:
|
||||
track:
|
||||
- person
|
||||
- car
|
||||
- ...
|
||||
```
|
||||
|
||||
See the [objects documentation](objects.md) for the list of objects that Frigate's default model tracks.
|
||||
:::
|
||||
|
||||
## Restricting alerts to specific labels
|
||||
|
||||
By default a review item will only be marked as an alert if a person or car is detected. This can be configured to include any object or audio label using the following config:
|
||||
|
||||
@ -36,8 +36,8 @@ Note that certbot uses symlinks, and those can't be followed by the container un
|
||||
frigate:
|
||||
...
|
||||
volumes:
|
||||
- /etc/letsencrypt/live/frigate:/etc/letsencrypt/live/frigate:ro
|
||||
- /etc/letsencrypt/archive/frigate:/etc/letsencrypt/archive/frigate:ro
|
||||
- /etc/letsencrypt/live/your.fqdn.net:/etc/letsencrypt/live/frigate:ro
|
||||
- /etc/letsencrypt/archive/your.fqdn.net:/etc/letsencrypt/archive/your.fqdn.net:ro
|
||||
...
|
||||
|
||||
```
|
||||
|
||||
@ -3,7 +3,7 @@ id: camera_setup
|
||||
title: Camera setup
|
||||
---
|
||||
|
||||
Cameras configured to output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. H.265 has better compression, but less compatibility. Chrome 108+, Safari and Edge are the only browsers able to play H.265 and only support a limited number of H.265 profiles. Ideally, cameras should be configured directly for the desired resolutions and frame rates you want to use in Frigate. Reducing frame rates within Frigate will waste CPU resources decoding extra frames that are discarded. There are three different goals that you want to tune your stream configurations around.
|
||||
Cameras configured to output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. H.265 has better compression, but less compatibility. Firefox 134+/136+/137+ (Windows/Mac/Linux & Android), Chrome 108+, Safari and Edge are the only browsers able to play H.265 and only support a limited number of H.265 profiles. Ideally, cameras should be configured directly for the desired resolutions and frame rates you want to use in Frigate. Reducing frame rates within Frigate will waste CPU resources decoding extra frames that are discarded. There are three different goals that you want to tune your stream configurations around.
|
||||
|
||||
- **Detection**: This is the only stream that Frigate will decode for processing. Also, this is the stream where snapshots will be generated from. The resolution for detection should be tuned for the size of the objects you want to detect. See [Choosing a detect resolution](#choosing-a-detect-resolution) for more details. The recommended frame rate is 5fps, but may need to be higher (10fps is the recommended maximum for most users) for very fast moving objects. Higher resolutions and frame rates will drive higher CPU usage on your server.
|
||||
|
||||
|
||||
@ -66,4 +66,4 @@ The time period starting when a tracked object entered the frame and ending when
|
||||
|
||||
## Zone
|
||||
|
||||
Zones are areas of interest, zones can be used for notifications and for limiting the areas where Frigate will create an [event](#event). [See the zone docs for more info](/configuration/zones)
|
||||
Zones are areas of interest, zones can be used for notifications and for limiting the areas where Frigate will create a [review item](#review-item). [See the zone docs for more info](/configuration/zones)
|
||||
|
||||
@ -9,23 +9,36 @@ Cameras that output H.264 video and AAC audio will offer the most compatibility
|
||||
|
||||
I recommend Dahua, Hikvision, and Amcrest in that order. Dahua edges out Hikvision because they are easier to find and order, not because they are better cameras. I personally use Dahua cameras because they are easier to purchase directly. In my experience Dahua and Hikvision both have multiple streams with configurable resolutions and frame rates and rock solid streams. They also both have models with large sensors well known for excellent image quality at night. Not all the models are equal. Larger sensors are better than higher resolutions; especially at night. Amcrest is the fallback recommendation because they are rebranded Dahuas. They are rebranding the lower end models with smaller sensors or less configuration options.
|
||||
|
||||
Many users have reported various issues with Reolink cameras, so I do not recommend them. If you are using Reolink, I suggest the [Reolink specific configuration](../configuration/camera_specific.md#reolink-cameras). Wifi cameras are also not recommended. Their streams are less reliable and cause connection loss and/or lost video data.
|
||||
WiFi cameras are not recommended as [their streams are less reliable and cause connection loss and/or lost video data](https://ipcamtalk.com/threads/camera-conflicts.68142/#post-738821), especially when more than a few WiFi cameras will be used at the same time.
|
||||
|
||||
Here are some of the camera's I recommend:
|
||||
Many users have reported various issues with 4K-plus Reolink cameras, it is best to stick with 5MP and lower for Reolink cameras. If you are using Reolink, I suggest the [Reolink specific configuration](../configuration/camera_specific.md#reolink-cameras).
|
||||
|
||||
Here are some of the cameras I recommend:
|
||||
|
||||
- <a href="https://amzn.to/4fwoNWA" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T549M-ALED-S3</a> (affiliate link)
|
||||
- <a href="https://amzn.to/3YXpcMw" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T54IR-AS</a> (affiliate link)
|
||||
- <a href="https://amzn.to/3AvBHoY" target="_blank" rel="nofollow noopener sponsored">Amcrest IP5M-T1179EW-AI-V3</a> (affiliate link)
|
||||
- <a href="https://amzn.to/4ltOpaC" target="_blank" rel="nofollow noopener sponsored">HIKVISION DS-2CD2387G2P-LSU/SL ColorVu 8MP Panoramic Turret IP Camera</a> (affiliate link)
|
||||
|
||||
I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||
|
||||
## Server
|
||||
|
||||
My current favorite is the Beelink EQ13 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Hailo8 or Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||
My current favorite is the Beelink EQ13 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral, Hailo, or other AI accelerators.
|
||||
|
||||
| Name | Notes |
|
||||
| ------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- |
|
||||
| Beelink EQ13 (<a href="https://amzn.to/4iQaBKu" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
Note that many of these mini PCs come with Windows pre-installed, and you will need to install Linux according to the [getting started guide](../guides/getting_started.md).
|
||||
|
||||
I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||
|
||||
:::warning
|
||||
|
||||
If the EQ13 is out of stock, the link below may take you to a suggested alternative on Amazon. The Beelink EQ14 has some known compatibility issues, so you should avoid that model for now.
|
||||
|
||||
:::
|
||||
|
||||
| Name | Coral Inference Speed | Coral Compatibility | Notes |
|
||||
| ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | ----------------------------------------------------------------------------------------- |
|
||||
| Beelink EQ13 (<a href="https://amzn.to/4jn2qVr" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
|
||||
## Detectors
|
||||
|
||||
@ -123,7 +136,23 @@ Inference speeds vary greatly depending on the CPU or GPU used, some known examp
|
||||
|
||||
### TensorRT - Nvidia GPU
|
||||
|
||||
The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 12.x series of CUDA libraries. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector).
|
||||
Frigate is able to utilize an Nvidia GPU which supports the 12.x series of CUDA libraries.
|
||||
|
||||
### Minimum Hardware Support
|
||||
|
||||
12.x series of CUDA libraries are used which have minor version compatibility. The minimum driver version on the host system must be `>=545`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
|
||||
|
||||
Make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU.
|
||||
|
||||
There are improved capabilities in newer GPU architectures that TensorRT can benefit from, such as INT8 operations and Tensor cores. The features compatible with your hardware will be optimized when the model is converted to a trt file. Currently the script provided for generating the model provides a switch to enable/disable FP16 operations. If you wish to use newer features such as INT8 optimization, more work is required.
|
||||
|
||||
#### Compatibility References:
|
||||
|
||||
[NVIDIA TensorRT Support Matrix](https://docs.nvidia.com/deeplearning/tensorrt/archives/tensorrt-841/support-matrix/index.html)
|
||||
|
||||
[NVIDIA CUDA Compatibility](https://docs.nvidia.com/deploy/cuda-compatibility/index.html)
|
||||
|
||||
[NVIDIA GPU Compute Capability](https://developer.nvidia.com/cuda-gpus)
|
||||
|
||||
Inference speeds will vary greatly depending on the GPU and the model used.
|
||||
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below:
|
||||
@ -193,4 +222,4 @@ Basically - When you increase the resolution and/or the frame rate of the stream
|
||||
|
||||
YES! The Coral does not help with decoding video streams.
|
||||
|
||||
Decompressing video streams takes a significant amount of CPU power. Video compression uses key frames (also known as I-frames) to send a full frame in the video stream. The following frames only include the difference from the key frame, and the CPU has to compile each frame by merging the differences with the key frame. [More detailed explanation](https://blog.video.ibm.com/streaming-video-tips/keyframes-interframe-video-compression/). Higher resolutions and frame rates mean more processing power is needed to decode the video stream, so try and set them on the camera to avoid unnecessary decoding work.
|
||||
Decompressing video streams takes a significant amount of CPU power. Video compression uses key frames (also known as I-frames) to send a full frame in the video stream. The following frames only include the difference from the key frame, and the CPU has to compile each frame by merging the differences with the key frame. [More detailed explanation](https://support.video.ibm.com/hc/en-us/articles/18106203580316-Keyframes-InterFrame-Video-Compression). Higher resolutions and frame rates mean more processing power is needed to decode the video stream, so try and set them on the camera to avoid unnecessary decoding work.
|
||||
|
||||
119
docs/docs/frigate/updating.md
Normal file
119
docs/docs/frigate/updating.md
Normal file
@ -0,0 +1,119 @@
|
||||
---
|
||||
id: updating
|
||||
title: Updating
|
||||
---
|
||||
|
||||
# Updating Frigate
|
||||
|
||||
The current stable version of Frigate is **0.15.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.15.0).
|
||||
|
||||
Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant Addon, etc.). Below are instructions for the most common setups.
|
||||
|
||||
## Before You Begin
|
||||
|
||||
- **Stop Frigate**: For most methods, you’ll need to stop the running Frigate instance before backing up and updating.
|
||||
- **Backup Your Configuration**: Always back up your `/config` directory (e.g., `config.yml` and `frigate.db`, the SQLite database) before updating. This ensures you can roll back if something goes wrong.
|
||||
- **Check Release Notes**: Carefully review the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases) for breaking changes or configuration updates that might affect your setup.
|
||||
|
||||
## Updating with Docker
|
||||
|
||||
If you’re running Frigate via Docker (recommended method), follow these steps:
|
||||
|
||||
1. **Stop the Container**:
|
||||
|
||||
- If using Docker Compose:
|
||||
```bash
|
||||
docker compose down frigate
|
||||
```
|
||||
- If using `docker run`:
|
||||
```bash
|
||||
docker stop frigate
|
||||
```
|
||||
|
||||
2. **Update and Pull the Latest Image**:
|
||||
|
||||
- If using Docker Compose:
|
||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.15.0` instead of `0.14.1`). For example:
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
image: ghcr.io/blakeblackshear/frigate:0.15.0
|
||||
```
|
||||
- Then pull the image:
|
||||
```bash
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.15.0
|
||||
```
|
||||
- **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you don’t need to update the tag manually. The `stable` tag always points to the latest stable release after pulling.
|
||||
- If using `docker run`:
|
||||
- Pull the image with the appropriate tag (e.g., `0.15.0`, `0.15.0-tensorrt`, or `stable`):
|
||||
```bash
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.15.0
|
||||
```
|
||||
|
||||
3. **Start the Container**:
|
||||
|
||||
- If using Docker Compose:
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
- If using `docker run`, re-run your original command (e.g., from the [Installation](./installation.md#docker) section) with the updated image tag.
|
||||
|
||||
4. **Verify the Update**:
|
||||
- Check the container logs to ensure Frigate starts successfully:
|
||||
```bash
|
||||
docker logs frigate
|
||||
```
|
||||
- Visit the Frigate Web UI (default: `http://<your-ip>:5000`) to confirm the new version is running. The version number is displayed at the top of the System Metrics page.
|
||||
|
||||
### Notes
|
||||
|
||||
- If you’ve customized other settings (e.g., `shm-size`), ensure they’re still appropriate after the update.
|
||||
- Docker will automatically use the updated image when you restart the container, as long as you pulled the correct version.
|
||||
|
||||
## Updating the Home Assistant Addon
|
||||
|
||||
For users running Frigate as a Home Assistant Addon:
|
||||
|
||||
1. **Check for Updates**:
|
||||
|
||||
- Navigate to **Settings > Add-ons** in Home Assistant.
|
||||
- Find your installed Frigate addon (e.g., "Frigate NVR" or "Frigate NVR (Full Access)").
|
||||
- If an update is available, you’ll see an "Update" button.
|
||||
|
||||
2. **Update the Addon**:
|
||||
|
||||
- Click the "Update" button next to the Frigate addon.
|
||||
- Wait for the process to complete. Home Assistant will handle downloading and installing the new version.
|
||||
|
||||
3. **Restart the Addon**:
|
||||
|
||||
- After updating, go to the addon’s page and click "Restart" to apply the changes.
|
||||
|
||||
4. **Verify the Update**:
|
||||
- Check the addon logs (under the "Log" tab) to ensure Frigate starts without errors.
|
||||
- Access the Frigate Web UI to confirm the new version is running.
|
||||
|
||||
### Notes
|
||||
|
||||
- Ensure your `/config/frigate.yml` is compatible with the new version by reviewing the [Release notes](https://github.com/blakeblackshear/frigate/releases).
|
||||
- If using custom hardware (e.g., Coral or GPU), verify that configurations still work, as addon updates don’t modify your hardware settings.
|
||||
|
||||
## Rolling Back
|
||||
|
||||
If an update causes issues:
|
||||
|
||||
1. Stop Frigate.
|
||||
2. Restore your backed-up config file and database.
|
||||
3. Revert to the previous image version:
|
||||
- For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.14.1`) in your `docker run` command.
|
||||
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.14.1`), and re-run `docker compose up -d`.
|
||||
- For Home Assistant: Reinstall the previous addon version manually via the repository if needed and restart the addon.
|
||||
4. Verify the old version is running again.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- **Container Fails to Start**: Check logs (`docker logs frigate`) for errors.
|
||||
- **UI Not Loading**: Ensure ports (e.g., 5000, 8971) are still mapped correctly and the service is running.
|
||||
- **Hardware Issues**: Revisit hardware-specific setup (e.g., Coral, GPU) if detection or decoding fails post-update.
|
||||
|
||||
Common questions are often answered in the [FAQ](https://github.com/blakeblackshear/frigate/discussions), pinned at the top of the support discussions.
|
||||
@ -35,6 +35,7 @@ There are many solutions available to implement reverse proxies and the communit
|
||||
* [Apache2](#apache2-reverse-proxy)
|
||||
* [Nginx](#nginx-reverse-proxy)
|
||||
* [Traefik](#traefik-reverse-proxy)
|
||||
* [Caddy](#caddy-reverse-proxy)
|
||||
|
||||
## Apache2 Reverse Proxy
|
||||
|
||||
@ -117,7 +118,8 @@ server {
|
||||
set $port 8971;
|
||||
|
||||
listen 80;
|
||||
listen 443 ssl http2;
|
||||
listen 443 ssl;
|
||||
http2 on;
|
||||
|
||||
server_name frigate.domain.com;
|
||||
}
|
||||
@ -177,3 +179,33 @@ The above configuration will create a "service" in Traefik, automatically adding
|
||||
It will also add a router, routing requests to "traefik.example.com" to your local container.
|
||||
|
||||
Note that with this approach, you don't need to expose any ports for the Frigate instance since all traffic will be routed over the internal Docker network.
|
||||
|
||||
## Caddy Reverse Proxy
|
||||
|
||||
This example shows Frigate running under a subdomain with logging and a tls cert (in this case a wildcard domain cert obtained independently of caddy) handled via imports
|
||||
|
||||
```caddy
|
||||
(logging) {
|
||||
log {
|
||||
output file /var/log/caddy/{args[0]}.log {
|
||||
roll_size 10MiB
|
||||
roll_keep 5
|
||||
roll_keep_for 10d
|
||||
}
|
||||
format json
|
||||
level INFO
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
(tls) {
|
||||
tls /var/lib/caddy/wildcard.YOUR_DOMAIN.TLD.fullchain.pem /var/lib/caddy/wildcard.YOUR_DOMAIN.TLD.privkey.pem
|
||||
}
|
||||
|
||||
frigate.YOUR_DOMAIN.TLD {
|
||||
reverse_proxy http://localhost:8971
|
||||
import tls
|
||||
import logging frigate.YOUR_DOMAIN.TLD
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
@ -110,6 +110,14 @@ If you run Frigate on a separate device within your local network, Home Assistan
|
||||
|
||||
Use `http://<frigate_device_ip>:8971` as the URL for the integration so that authentication is required.
|
||||
|
||||
:::tip
|
||||
|
||||
The above URL assumes you have [disabled TLS](../configuration/tls).
|
||||
By default, TLS is enabled and Frigate will be using a self-signed certificate. HomeAssistant will fail to connect HTTPS to port 8971 since it fails to verify the self-signed certificate.
|
||||
Either disable TLS and use HTTP from HomeAssistant, or configure Frigate to be acessible with a valid certificate.
|
||||
|
||||
:::
|
||||
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
|
||||
@ -28,7 +28,14 @@ Message published for each changed tracked object. The first message is publishe
|
||||
"id": "1607123955.475377-mxklsc",
|
||||
"camera": "front_door",
|
||||
"frame_time": 1607123961.837752,
|
||||
"snapshot_time": 1607123961.837752,
|
||||
"snapshot": {
|
||||
"frame_time": 1607123965.975463,
|
||||
"box": [415, 489, 528, 700],
|
||||
"area": 12728,
|
||||
"region": [260, 446, 660, 846],
|
||||
"score": 0.77546,
|
||||
"attributes": [],
|
||||
},
|
||||
"label": "person",
|
||||
"sub_label": null,
|
||||
"top_score": 0.958984375,
|
||||
@ -62,7 +69,14 @@ Message published for each changed tracked object. The first message is publishe
|
||||
"id": "1607123955.475377-mxklsc",
|
||||
"camera": "front_door",
|
||||
"frame_time": 1607123962.082975,
|
||||
"snapshot_time": 1607123961.837752,
|
||||
"snapshot": {
|
||||
"frame_time": 1607123965.975463,
|
||||
"box": [415, 489, 528, 700],
|
||||
"area": 12728,
|
||||
"region": [260, 446, 660, 846],
|
||||
"score": 0.77546,
|
||||
"attributes": [],
|
||||
},
|
||||
"label": "person",
|
||||
"sub_label": ["John Smith", 0.79],
|
||||
"top_score": 0.958984375,
|
||||
|
||||
@ -43,7 +43,7 @@ Snapshots must be enabled to be able to submit examples to Frigate+
|
||||
|
||||
### Annotate and verify
|
||||
|
||||
You can view all of your submitted images at [https://plus.frigate.video](https://plus.frigate.video). Annotations can be added by clicking an image. For more detailed information about labeling, see the documentation on [improving your model](../plus/improving_model.md).
|
||||
You can view all of your submitted images at [https://plus.frigate.video](https://plus.frigate.video). Annotations can be added by clicking an image. For more detailed information about labeling, see the documentation on [annotating](../plus/annotating.md).
|
||||
|
||||

|
||||
|
||||
|
||||
@ -13,6 +13,10 @@ Please use your own knowledge to assess and vet them before you install anything
|
||||
|
||||
:::
|
||||
|
||||
## [Advanced Camera Card (formerly known as Frigate Card](https://card.camera/#/README)
|
||||
|
||||
The [Advanced Camera Card](https://card.camera/#/README) is a Home Assistant dashboard card with deep Frigate integration.
|
||||
|
||||
## [Double Take](https://github.com/skrashevich/double-take)
|
||||
|
||||
[Double Take](https://github.com/skrashevich/double-take) provides an unified UI and API for processing and training images for facial recognition.
|
||||
|
||||
@ -1,17 +1,9 @@
|
||||
---
|
||||
id: improving_model
|
||||
title: Improving your model
|
||||
id: annotating
|
||||
title: Annotating your images
|
||||
---
|
||||
|
||||
You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. With all the new images now being submitted by subscribers, future base models will improve as more and more examples are incorporated. Note that only images with at least one verified label will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
|
||||
|
||||
- **Submit both true positives and false positives**. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
||||
- **Lower your thresholds a little in order to generate more false/true positives near the threshold value**. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores.
|
||||
- **Submit diverse images**. For the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. As circumstances change, you may need to submit new examples to address new types of false positives. For example, the change from summer days to snowy winter days or other changes such as a new grill or patio furniture may require additional examples and training.
|
||||
|
||||
## Properly labeling images
|
||||
|
||||
For the best results, follow the following guidelines.
|
||||
For the best results, follow these guidelines. You may also want to review the documentation on [improving your model](./index.md#improving-your-model).
|
||||
|
||||
**Label every object in the image**: It is important that you label all objects in each image before verifying. If you don't label a car for example, the model will be taught that part of the image is _not_ a car and it will start to get confused. You can exclude labels that you don't want detected on any of your cameras.
|
||||
|
||||
@ -25,9 +17,17 @@ For the best results, follow the following guidelines.
|
||||
|
||||

|
||||
|
||||
## AI suggested labels
|
||||
|
||||
If you have an active Frigate+ subscription, new uploads will be scanned for the objects configured for you camera and you will see suggested labels as light blue boxes when annotating in Frigate+. These suggestions are processed via a queue and typically complete within a minute after uploading, but processing times can be longer.
|
||||
|
||||

|
||||
|
||||
Suggestions are converted to labels when saving, so you should remove any errant suggestions. There is already some logic designed to avoid duplicate labels, but you may still occasionally see some duplicate suggestions. You should keep the most accurate bounding box and delete any duplicates so that you have just one label per object remaining.
|
||||
|
||||
## False positive labels
|
||||
|
||||
False positives will be shown with a read box and the label will have a strike through.
|
||||
False positives will be shown with a read box and the label will have a strike through. These can't be adjusted, but they can be deleted if you accidentally submit a true positive as a false positive from Frigate.
|
||||

|
||||
|
||||
Misidentified objects should have a correct label added. For example, if a person was mistakenly detected as a cat, you should submit it as a false positive in Frigate and add a label for the person. The boxes will overlap.
|
||||
@ -9,11 +9,11 @@ Before requesting your first model, you will need to upload and verify at least
|
||||
|
||||
It is recommended to submit **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
||||
|
||||
For more detailed recommendations, you can refer to the docs on [improving your model](./improving_model.md).
|
||||
For more detailed recommendations, you can refer to the docs on [annotating](./annotating.md).
|
||||
|
||||
## Step 2: Submit a model request
|
||||
|
||||
Once you have an initial set of verified images, you can request a model on the Models page. For guidance on choosing a model type, refer to [this part of the documentation](./index.md#available-model-types). Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours.
|
||||
Once you have an initial set of verified images, you can request a model on the Models page. For guidance on choosing a model type, refer to [this part of the documentation](./index.md#available-model-types). If you are unsure which type to request, you can test the base model for each version from the "Base Models" tab. Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours.
|
||||

|
||||
|
||||
## Step 3: Set your model id in the config
|
||||
|
||||
@ -3,15 +3,9 @@ id: index
|
||||
title: Models
|
||||
---
|
||||
|
||||
<a href="https://frigate.video/plus" target="_blank" rel="nofollow">Frigate+</a> offers models trained on images submitted by Frigate+ users from their security cameras and is specifically designed for the way Frigate analyzes video footage. These models offer higher accuracy with less resources. The images you upload are used to fine tune a baseline model trained from images uploaded by all Frigate+ users. This fine tuning process results in a model that is optimized for accuracy in your specific conditions.
|
||||
<a href="https://frigate.video/plus" target="_blank" rel="nofollow">Frigate+</a> offers models trained on images submitted by Frigate+ users from their security cameras and is specifically designed for the way Frigate NVR analyzes video footage. These models offer higher accuracy with less resources. The images you upload are used to fine tune a base model trained from images uploaded by all Frigate+ users. This fine tuning process results in a model that is optimized for accuracy in your specific conditions.
|
||||
|
||||
:::info
|
||||
|
||||
The baseline model isn't directly available after subscribing. This may change in the future, but for now you will need to submit a model request with the minimum number of images.
|
||||
|
||||
:::
|
||||
|
||||
With a subscription, 12 model trainings per year are included. If you cancel your subscription, you will retain access to any trained models. An active subscription is required to submit model requests or purchase additional trainings.
|
||||
With a subscription, 12 model trainings to fine tune your model per year are included. In addition, you will have access to any base models published while your subscription is active. If you cancel your subscription, you will retain access to any trained and base models in your account. An active subscription is required to submit model requests or purchase additional trainings. New base models are published quarterly with target dates of January 15th, April 15th, July 15th, and October 15th.
|
||||
|
||||
Information on how to integrate Frigate+ with Frigate can be found in the [integration docs](../integrations/plus.md).
|
||||
|
||||
@ -19,7 +13,7 @@ Information on how to integrate Frigate+ with Frigate can be found in the [integ
|
||||
|
||||
There are two model types offered in Frigate+, `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
|
||||
|
||||
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types).
|
||||
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types). You can test model types for compatibility and speed on your hardware by using the base models.
|
||||
|
||||
| Model Type | Description |
|
||||
| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
@ -36,19 +30,27 @@ Using Frigate+ models with `onnx` is only available with Frigate 0.15 and later.
|
||||
|
||||
:::
|
||||
|
||||
| Hardware | Recommended Detector Type | Recommended Model Type |
|
||||
| ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ---------------------- |
|
||||
| [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended) | `cpu` | `mobiledet` |
|
||||
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` |
|
||||
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` |
|
||||
| [NVidia GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` |
|
||||
| [AMD ROCm GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#amdrocm-gpu-detector)\* | `onnx` | `yolonas` |
|
||||
| Hardware | Recommended Detector Type | Recommended Model Type |
|
||||
| -------------------------------------------------------------------------------- | ------------------------- | ---------------------- |
|
||||
| [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended) | `cpu` | `mobiledet` |
|
||||
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` |
|
||||
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` |
|
||||
| [NVidia GPU](/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` |
|
||||
| [AMD ROCm GPU](/configuration/object_detectors#amdrocm-gpu-detector)\* | `rocm` | `yolonas` |
|
||||
|
||||
_\* Requires Frigate 0.15_
|
||||
|
||||
## Improving your model
|
||||
|
||||
Some users may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. With all the new images now being submitted by subscribers, future base models will improve as more and more examples are incorporated. Note that only images with at least one verified label will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
|
||||
|
||||
- **Submit both true positives and false positives**. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
||||
- **Lower your thresholds a little in order to generate more false/true positives near the threshold value**. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores.
|
||||
- **Submit diverse images**. For the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. As circumstances change, you may need to submit new examples to address new types of false positives. For example, the change from summer days to snowy winter days or other changes such as a new grill or patio furniture may require additional examples and training.
|
||||
|
||||
## Available label types
|
||||
|
||||
Frigate+ models support a more relevant set of objects for security cameras. Currently, the following objects are supported:
|
||||
Frigate+ models support a more relevant set of objects for security cameras. The labels for annotation in Frigate+ are configurable by editing the camera in the Cameras section of Frigate+. Currently, the following objects are supported:
|
||||
|
||||
- **People**: `person`, `face`
|
||||
- **Vehicles**: `car`, `motorcycle`, `bicycle`, `boat`, `license_plate`
|
||||
@ -58,6 +60,16 @@ Frigate+ models support a more relevant set of objects for security cameras. Cur
|
||||
|
||||
Other object types available in the default Frigate model are not available. Additional object types will be added in future releases.
|
||||
|
||||
### Candidate labels
|
||||
|
||||
Candidate labels are also available for annotation. These labels don't have enough data to be included in the model yet, but using them will help add support sooner. You can enable these labels by editing the camera settings.
|
||||
|
||||
Where possible, these labels are mapped to existing labels during training. For example, any `baby` labels are mapped to `person` until support for new labels is added.
|
||||
|
||||
The candidate labels are: `baby`, `royal mail`, `canada post`, `bpost`, `skunk`, `badger`, `possum`, `rodent`, `kangaroo`, `chicken`, `groundhog`, `boar`, `hedgehog`, `school bus`, `tractor`, `golf cart`, `garbage truck`, `bus`, `sports ball`
|
||||
|
||||
Candidate labels are not available for automatic suggestions.
|
||||
|
||||
### Label attributes
|
||||
|
||||
Frigate has special handling for some labels when using Frigate+ models. `face`, `license_plate`, and delivery logos such as `amazon`, `ups`, and `fedex` are considered attribute labels which are not tracked like regular objects and do not generate review items directly. In addition, the `threshold` filter will have no effect on these labels. You should adjust the `min_score` and other filter values as needed.
|
||||
|
||||
@ -46,6 +46,17 @@ Some users have reported that this older device runs an older kernel causing iss
|
||||
6. Open the control panel - info scree. The coral TPU will now be recognised as a USB Device - google inc
|
||||
7. Start the frigate container. Everything should work now!
|
||||
|
||||
### QNAP NAS
|
||||
|
||||
QNAP NAS devices, such as the TS-253A, may use connected Coral TPU devices if [QuMagie](https://www.qnap.com/en/software/qumagie) is installed along with its QNAP AI Core extension. If any of the features—`facial recognition`, `object recognition`, or `similar photo recognition`—are enabled, Container Station applications such as `Frigate` or `CodeProject.AI Server` will be unable to initialize the TPU device in use.
|
||||
To allow the Coral TPU device to be discovered, the you must either:
|
||||
|
||||
1. [Disable the AI recognition features in QuMagie](https://docs.qnap.com/application/qumagie/2.x/en-us/configuring-qnap-ai-core-settings-FB13CE03.html),
|
||||
2. Remove the QNAP AI Core extension or
|
||||
3. Manually start the QNAP AI Core extension after Frigate has fully started (not recommended).
|
||||
|
||||
It is also recommended to restart the NAS once the changes have been made.
|
||||
|
||||
## USB Coral Detection Appears to be Stuck
|
||||
|
||||
The USB Coral can become stuck and need to be restarted, this can happen for a number of reasons depending on hardware and software setup. Some common reasons are:
|
||||
|
||||
@ -1,19 +1,24 @@
|
||||
import type * as Preset from '@docusaurus/preset-classic';
|
||||
import * as path from 'node:path';
|
||||
import type { Config, PluginConfig } from '@docusaurus/types';
|
||||
import type * as OpenApiPlugin from 'docusaurus-plugin-openapi-docs';
|
||||
import type * as Preset from "@docusaurus/preset-classic";
|
||||
import * as path from "node:path";
|
||||
import type { Config, PluginConfig } from "@docusaurus/types";
|
||||
import type * as OpenApiPlugin from "docusaurus-plugin-openapi-docs";
|
||||
|
||||
const config: Config = {
|
||||
title: 'Frigate',
|
||||
tagline: 'NVR With Realtime Object Detection for IP Cameras',
|
||||
url: 'https://docs.frigate.video',
|
||||
baseUrl: '/',
|
||||
onBrokenLinks: 'throw',
|
||||
onBrokenMarkdownLinks: 'warn',
|
||||
favicon: 'img/favicon.ico',
|
||||
organizationName: 'blakeblackshear',
|
||||
projectName: 'frigate',
|
||||
themes: ['@docusaurus/theme-mermaid', 'docusaurus-theme-openapi-docs'],
|
||||
title: "Frigate",
|
||||
tagline: "NVR With Realtime Object Detection for IP Cameras",
|
||||
url: "https://docs.frigate.video",
|
||||
baseUrl: "/",
|
||||
onBrokenLinks: "throw",
|
||||
onBrokenMarkdownLinks: "warn",
|
||||
favicon: "img/favicon.ico",
|
||||
organizationName: "blakeblackshear",
|
||||
projectName: "frigate",
|
||||
themes: [
|
||||
"@docusaurus/theme-mermaid",
|
||||
"docusaurus-theme-openapi-docs",
|
||||
"@inkeep/docusaurus/chatButton",
|
||||
"@inkeep/docusaurus/searchBar",
|
||||
],
|
||||
markdown: {
|
||||
mermaid: true,
|
||||
},
|
||||
@ -27,39 +32,79 @@ const config: Config = {
|
||||
},
|
||||
},
|
||||
themeConfig: {
|
||||
algolia: {
|
||||
appId: 'WIURGBNBPY',
|
||||
apiKey: 'd02cc0a6a61178b25da550212925226b',
|
||||
indexName: 'frigate',
|
||||
announcementBar: {
|
||||
id: 'frigate_plus',
|
||||
content: `
|
||||
<span style="margin-right: 8px; display: inline-block; animation: pulse 2s infinite;">🚀</span>
|
||||
Get more relevant and accurate detections with Frigate+ models.
|
||||
<a style="margin-left: 12px; padding: 3px 10px; background: #94d2bd; color: #001219; text-decoration: none; border-radius: 4px; font-weight: 500; " target="_blank" rel="noopener noreferrer" href="https://frigate.video/plus/">Learn more</a>
|
||||
<span style="margin-left: 8px; display: inline-block; animation: pulse 2s infinite;">✨</span>
|
||||
<style>
|
||||
@keyframes pulse {
|
||||
0%, 100% { transform: scale(1); }
|
||||
50% { transform: scale(1.1); }
|
||||
}
|
||||
</style>`,
|
||||
backgroundColor: '#005f73',
|
||||
textColor: '#e0fbfc',
|
||||
isCloseable: false,
|
||||
},
|
||||
docs: {
|
||||
sidebar: {
|
||||
hideable: true,
|
||||
},
|
||||
},
|
||||
inkeepConfig: {
|
||||
baseSettings: {
|
||||
apiKey: "b1a4c4d73c9b48aa5b3cdae6e4c81f0bb3d1134eeb5a7100",
|
||||
integrationId: "cm6xmhn9h000gs601495fkkdx",
|
||||
organizationId: "org_map2JQEOco8U1ZYY",
|
||||
primaryBrandColor: "#010101",
|
||||
},
|
||||
aiChatSettings: {
|
||||
chatSubjectName: "Frigate",
|
||||
botAvatarSrcUrl: "https://frigate.video/images/favicon.png",
|
||||
getHelpCallToActions: [
|
||||
{
|
||||
name: "GitHub",
|
||||
url: "https://github.com/blakeblackshear/frigate",
|
||||
icon: {
|
||||
builtIn: "FaGithub",
|
||||
},
|
||||
},
|
||||
],
|
||||
quickQuestions: [
|
||||
"How to configure and setup camera settings?",
|
||||
"How to setup notifications?",
|
||||
"Supported builtin detectors?",
|
||||
"How to restream video feed?",
|
||||
"How can I get sound or audio in my recordings?",
|
||||
],
|
||||
},
|
||||
},
|
||||
prism: {
|
||||
additionalLanguages: ['bash', 'json'],
|
||||
additionalLanguages: ["bash", "json"],
|
||||
},
|
||||
languageTabs: [
|
||||
{
|
||||
highlight: 'python',
|
||||
language: 'python',
|
||||
logoClass: 'python',
|
||||
highlight: "python",
|
||||
language: "python",
|
||||
logoClass: "python",
|
||||
},
|
||||
{
|
||||
highlight: 'javascript',
|
||||
language: 'nodejs',
|
||||
logoClass: 'nodejs',
|
||||
highlight: "javascript",
|
||||
language: "nodejs",
|
||||
logoClass: "nodejs",
|
||||
},
|
||||
{
|
||||
highlight: 'javascript',
|
||||
language: 'javascript',
|
||||
logoClass: 'javascript',
|
||||
highlight: "javascript",
|
||||
language: "javascript",
|
||||
logoClass: "javascript",
|
||||
},
|
||||
{
|
||||
highlight: 'bash',
|
||||
language: 'curl',
|
||||
logoClass: 'curl',
|
||||
highlight: "bash",
|
||||
language: "curl",
|
||||
logoClass: "curl",
|
||||
},
|
||||
{
|
||||
highlight: "rust",
|
||||
@ -68,28 +113,28 @@ const config: Config = {
|
||||
},
|
||||
],
|
||||
navbar: {
|
||||
title: 'Frigate',
|
||||
title: "Frigate",
|
||||
logo: {
|
||||
alt: 'Frigate',
|
||||
src: 'img/logo.svg',
|
||||
srcDark: 'img/logo-dark.svg',
|
||||
alt: "Frigate",
|
||||
src: "img/logo.svg",
|
||||
srcDark: "img/logo-dark.svg",
|
||||
},
|
||||
items: [
|
||||
{
|
||||
to: '/',
|
||||
activeBasePath: 'docs',
|
||||
label: 'Docs',
|
||||
position: 'left',
|
||||
to: "/",
|
||||
activeBasePath: "docs",
|
||||
label: "Docs",
|
||||
position: "left",
|
||||
},
|
||||
{
|
||||
href: 'https://frigate.video',
|
||||
label: 'Website',
|
||||
position: 'right',
|
||||
href: "https://frigate.video",
|
||||
label: "Website",
|
||||
position: "right",
|
||||
},
|
||||
{
|
||||
href: 'http://demo.frigate.video',
|
||||
label: 'Demo',
|
||||
position: 'right',
|
||||
href: "http://demo.frigate.video",
|
||||
label: "Demo",
|
||||
position: "right",
|
||||
},
|
||||
{
|
||||
type: 'localeDropdown',
|
||||
@ -109,18 +154,18 @@ const config: Config = {
|
||||
],
|
||||
},
|
||||
footer: {
|
||||
style: 'dark',
|
||||
style: "dark",
|
||||
links: [
|
||||
{
|
||||
title: 'Community',
|
||||
title: "Community",
|
||||
items: [
|
||||
{
|
||||
label: 'GitHub',
|
||||
href: 'https://github.com/blakeblackshear/frigate',
|
||||
label: "GitHub",
|
||||
href: "https://github.com/blakeblackshear/frigate",
|
||||
},
|
||||
{
|
||||
label: 'Discussions',
|
||||
href: 'https://github.com/blakeblackshear/frigate/discussions',
|
||||
label: "Discussions",
|
||||
href: "https://github.com/blakeblackshear/frigate/discussions",
|
||||
},
|
||||
],
|
||||
},
|
||||
@ -129,19 +174,19 @@ const config: Config = {
|
||||
},
|
||||
},
|
||||
plugins: [
|
||||
path.resolve(__dirname, 'plugins', 'raw-loader'),
|
||||
path.resolve(__dirname, "plugins", "raw-loader"),
|
||||
[
|
||||
'docusaurus-plugin-openapi-docs',
|
||||
"docusaurus-plugin-openapi-docs",
|
||||
{
|
||||
id: 'openapi',
|
||||
docsPluginId: 'classic', // configured for preset-classic
|
||||
id: "openapi",
|
||||
docsPluginId: "classic", // configured for preset-classic
|
||||
config: {
|
||||
frigateApi: {
|
||||
specPath: 'static/frigate-api.yaml',
|
||||
outputDir: 'docs/integrations/api',
|
||||
specPath: "static/frigate-api.yaml",
|
||||
outputDir: "docs/integrations/api",
|
||||
sidebarOptions: {
|
||||
groupPathsBy: 'tag',
|
||||
categoryLinkSource: 'tag',
|
||||
groupPathsBy: "tag",
|
||||
categoryLinkSource: "tag",
|
||||
sidebarCollapsible: true,
|
||||
sidebarCollapsed: true,
|
||||
},
|
||||
@ -149,23 +194,24 @@ const config: Config = {
|
||||
} satisfies OpenApiPlugin.Options,
|
||||
},
|
||||
},
|
||||
]
|
||||
],
|
||||
] as PluginConfig[],
|
||||
presets: [
|
||||
[
|
||||
'classic',
|
||||
"classic",
|
||||
{
|
||||
docs: {
|
||||
routeBasePath: '/',
|
||||
sidebarPath: './sidebars.ts',
|
||||
routeBasePath: "/",
|
||||
sidebarPath: "./sidebars.ts",
|
||||
// Please change this to your repo.
|
||||
editUrl: 'https://github.com/blakeblackshear/frigate/edit/master/docs/',
|
||||
editUrl:
|
||||
"https://github.com/blakeblackshear/frigate/edit/master/docs/",
|
||||
sidebarCollapsible: false,
|
||||
docItemComponent: '@theme/ApiItem', // Derived from docusaurus-theme-openapi
|
||||
docItemComponent: "@theme/ApiItem", // Derived from docusaurus-theme-openapi
|
||||
},
|
||||
|
||||
theme: {
|
||||
customCss: './src/css/custom.css',
|
||||
customCss: "./src/css/custom.css",
|
||||
},
|
||||
} satisfies Preset.Options,
|
||||
],
|
||||
|
||||
7
docs/package-lock.json
generated
7
docs/package-lock.json
generated
@ -12,6 +12,7 @@
|
||||
"@docusaurus/plugin-content-docs": "^3.6.3",
|
||||
"@docusaurus/preset-classic": "^3.7.0",
|
||||
"@docusaurus/theme-mermaid": "^3.6.3",
|
||||
"@inkeep/docusaurus": "^2.0.16",
|
||||
"@mdx-js/react": "^3.1.0",
|
||||
"clsx": "^2.1.1",
|
||||
"docusaurus-plugin-openapi-docs": "^4.3.1",
|
||||
@ -3954,6 +3955,12 @@
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/@inkeep/docusaurus": {
|
||||
"version": "2.0.16",
|
||||
"resolved": "https://registry.npmjs.org/@inkeep/docusaurus/-/docusaurus-2.0.16.tgz",
|
||||
"integrity": "sha512-dQhjlvFnl3CVr0gWeJ/V/qLnDy1XYrCfkdVSa2D3gJTxI9/vOf9639Y1aPxTxO88DiXuW9CertLrZLB6SoJ2yg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@isaacs/cliui": {
|
||||
"version": "8.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
|
||||
|
||||
@ -21,6 +21,7 @@
|
||||
"@docusaurus/plugin-content-docs": "^3.6.3",
|
||||
"@docusaurus/preset-classic": "^3.7.0",
|
||||
"@docusaurus/theme-mermaid": "^3.6.3",
|
||||
"@inkeep/docusaurus": "^2.0.16",
|
||||
"@mdx-js/react": "^3.1.0",
|
||||
"clsx": "^2.1.1",
|
||||
"docusaurus-plugin-openapi-docs": "^4.3.1",
|
||||
|
||||
@ -5,12 +5,13 @@ import frigateHttpApiSidebar from "./docs/integrations/api/sidebar";
|
||||
const sidebars: SidebarsConfig = {
|
||||
docs: {
|
||||
Frigate: [
|
||||
"frigate/index",
|
||||
"frigate/hardware",
|
||||
"frigate/installation",
|
||||
"frigate/camera_setup",
|
||||
"frigate/video_pipeline",
|
||||
"frigate/glossary",
|
||||
'frigate/index',
|
||||
'frigate/hardware',
|
||||
'frigate/installation',
|
||||
'frigate/updating',
|
||||
'frigate/camera_setup',
|
||||
'frigate/video_pipeline',
|
||||
'frigate/glossary',
|
||||
],
|
||||
Guides: [
|
||||
"guides/getting_started",
|
||||
@ -91,11 +92,11 @@ const sidebars: SidebarsConfig = {
|
||||
"configuration/metrics",
|
||||
"integrations/third_party_extensions",
|
||||
],
|
||||
"Frigate+": [
|
||||
"plus/index",
|
||||
"plus/first_model",
|
||||
"plus/improving_model",
|
||||
"plus/faq",
|
||||
'Frigate+': [
|
||||
'plus/index',
|
||||
'plus/annotating',
|
||||
'plus/first_model',
|
||||
'plus/faq',
|
||||
],
|
||||
Troubleshooting: [
|
||||
"troubleshooting/faqs",
|
||||
|
||||
230
docs/static/frigate-api.yaml
vendored
230
docs/static/frigate-api.yaml
vendored
@ -105,7 +105,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/users/{username}":
|
||||
/users/{username}:
|
||||
delete:
|
||||
tags:
|
||||
- Auth
|
||||
@ -130,7 +130,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/users/{username}/password":
|
||||
/users/{username}/password:
|
||||
put:
|
||||
tags:
|
||||
- Auth
|
||||
@ -161,7 +161,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/users/{username}/role":
|
||||
/users/{username}/role:
|
||||
put:
|
||||
tags:
|
||||
- Auth
|
||||
@ -228,7 +228,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/faces/train/{name}/classify":
|
||||
/faces/train/{name}/classify:
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
@ -259,7 +259,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/faces/{name}/create":
|
||||
/faces/{name}/create:
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
@ -284,7 +284,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/faces/{name}/register":
|
||||
/faces/{name}/register:
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
@ -340,7 +340,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/faces/{name}/delete":
|
||||
/faces/{name}/delete:
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
@ -371,6 +371,37 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/faces/{old_name}/rename:
|
||||
put:
|
||||
tags:
|
||||
- Events
|
||||
summary: Rename Face
|
||||
operationId: rename_face_faces__old_name__rename_put
|
||||
parameters:
|
||||
- name: old_name
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
title: Old Name
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/RenameFaceBody"
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/lpr/reprocess:
|
||||
put:
|
||||
tags:
|
||||
@ -659,7 +690,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/review/event/{event_id}":
|
||||
/review/event/{event_id}:
|
||||
get:
|
||||
tags:
|
||||
- Review
|
||||
@ -685,7 +716,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/review/{review_id}":
|
||||
/review/{review_id}:
|
||||
get:
|
||||
tags:
|
||||
- Review
|
||||
@ -711,7 +742,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/review/{review_id}/viewed":
|
||||
/review/{review_id}/viewed:
|
||||
delete:
|
||||
tags:
|
||||
- Review
|
||||
@ -774,7 +805,7 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"/go2rtc/streams/{camera_name}":
|
||||
/go2rtc/streams/{camera_name}:
|
||||
get:
|
||||
tags:
|
||||
- App
|
||||
@ -991,7 +1022,7 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"/logs/{service}":
|
||||
/logs/{service}:
|
||||
get:
|
||||
tags:
|
||||
- App
|
||||
@ -1287,7 +1318,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/preview/{camera_name}/start/{start_ts}/end/{end_ts}":
|
||||
/preview/{camera_name}/start/{start_ts}/end/{end_ts}:
|
||||
get:
|
||||
tags:
|
||||
- Preview
|
||||
@ -1325,7 +1356,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/preview/{year_month}/{day}/{hour}/{camera_name}/{tz_name}":
|
||||
/preview/{year_month}/{day}/{hour}/{camera_name}/{tz_name}:
|
||||
get:
|
||||
tags:
|
||||
- Preview
|
||||
@ -1376,7 +1407,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/preview/{camera_name}/start/{start_ts}/end/{end_ts}/frames":
|
||||
/preview/{camera_name}/start/{start_ts}/end/{end_ts}/frames:
|
||||
get:
|
||||
tags:
|
||||
- Preview
|
||||
@ -1463,7 +1494,7 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"/export/{camera_name}/start/{start_time}/end/{end_time}":
|
||||
/export/{camera_name}/start/{start_time}/end/{end_time}:
|
||||
post:
|
||||
tags:
|
||||
- Export
|
||||
@ -1507,7 +1538,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/export/{event_id}/rename":
|
||||
/export/{event_id}/rename:
|
||||
patch:
|
||||
tags:
|
||||
- Export
|
||||
@ -1538,7 +1569,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/export/{event_id}":
|
||||
/export/{event_id}:
|
||||
delete:
|
||||
tags:
|
||||
- Export
|
||||
@ -1563,7 +1594,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/exports/{export_id}":
|
||||
/exports/{export_id}:
|
||||
get:
|
||||
tags:
|
||||
- Export
|
||||
@ -1699,7 +1730,7 @@ paths:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: "null"
|
||||
default: "00:00,24:00"
|
||||
default: 00:00,24:00
|
||||
title: Time Range
|
||||
- name: has_clip
|
||||
in: query
|
||||
@ -2007,7 +2038,7 @@ paths:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: "null"
|
||||
default: "00:00,24:00"
|
||||
default: 00:00,24:00
|
||||
title: Time Range
|
||||
- name: has_clip
|
||||
in: query
|
||||
@ -2147,7 +2178,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{event_id}":
|
||||
/events/{event_id}:
|
||||
get:
|
||||
tags:
|
||||
- Events
|
||||
@ -2198,7 +2229,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{event_id}/retain":
|
||||
/events/{event_id}/retain:
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
@ -2249,7 +2280,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{event_id}/plus":
|
||||
/events/{event_id}/plus:
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
@ -2280,7 +2311,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{event_id}/false_positive":
|
||||
/events/{event_id}/false_positive:
|
||||
put:
|
||||
tags:
|
||||
- Events
|
||||
@ -2306,7 +2337,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{event_id}/sub_label":
|
||||
/events/{event_id}/sub_label:
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
@ -2338,7 +2369,39 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{event_id}/description":
|
||||
/events/{event_id}/recognized_license_plate:
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
summary: Set Plate
|
||||
operationId: set_plate_events__event_id__recognized_license_plate_post
|
||||
parameters:
|
||||
- name: event_id
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
title: Event Id
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/EventsLPRBody"
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/GenericResponse"
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/events/{event_id}/description:
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
@ -2370,7 +2433,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{event_id}/description/regenerate":
|
||||
/events/{event_id}/description/regenerate:
|
||||
put:
|
||||
tags:
|
||||
- Events
|
||||
@ -2430,7 +2493,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{camera_name}/{label}/create":
|
||||
/events/{camera_name}/{label}/create:
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
@ -2473,7 +2536,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{event_id}/end":
|
||||
/events/{event_id}/end:
|
||||
put:
|
||||
tags:
|
||||
- Events
|
||||
@ -2505,7 +2568,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}":
|
||||
/{camera_name}:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -2592,7 +2655,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/ptz/info":
|
||||
/{camera_name}/ptz/info:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -2617,7 +2680,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/latest.{extension}":
|
||||
/{camera_name}/latest.{extension}:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -2720,7 +2783,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/recordings/{frame_time}/snapshot.{format}":
|
||||
/{camera_name}/recordings/{frame_time}/snapshot.{format}:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -2767,7 +2830,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/plus/{frame_time}":
|
||||
/{camera_name}/plus/{frame_time}:
|
||||
post:
|
||||
tags:
|
||||
- Media
|
||||
@ -2846,7 +2909,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/recordings/summary":
|
||||
/{camera_name}/recordings/summary:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -2879,13 +2942,13 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/recordings":
|
||||
/{camera_name}/recordings:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
summary: Recordings
|
||||
description: >-
|
||||
Return specific camera recordings between the given 'after'/'end' times.
|
||||
Return specific camera recordings between the given "after"/"end" times.
|
||||
If not provided the last hour will be used
|
||||
operationId: recordings__camera_name__recordings_get
|
||||
parameters:
|
||||
@ -2900,14 +2963,14 @@ paths:
|
||||
required: false
|
||||
schema:
|
||||
type: number
|
||||
default: 1744227965.180043
|
||||
default: 1752611870.43948
|
||||
title: After
|
||||
- name: before
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: number
|
||||
default: 1744231565.180048
|
||||
default: 1752615470.43949
|
||||
title: Before
|
||||
responses:
|
||||
"200":
|
||||
@ -2921,13 +2984,14 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4":
|
||||
/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
summary: Recording Clip
|
||||
description: >-
|
||||
For iOS devices, use the master.m3u8 HLS link instead of clip.mp4. Safari does not reliably process progressive mp4 files.
|
||||
For iOS devices, use the master.m3u8 HLS link instead of clip.mp4.
|
||||
Safari does not reliably process progressive mp4 files.
|
||||
operationId: recording_clip__camera_name__start__start_ts__end__end_ts__clip_mp4_get
|
||||
parameters:
|
||||
- name: camera_name
|
||||
@ -2960,11 +3024,14 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/vod/{camera_name}/start/{start_ts}/end/{end_ts}":
|
||||
/vod/{camera_name}/start/{start_ts}/end/{end_ts}:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
summary: Vod Ts
|
||||
description: >-
|
||||
Returns an HLS playlist for the specified timestamp-range on the
|
||||
specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.
|
||||
operationId: vod_ts_vod__camera_name__start__start_ts__end__end_ts__get
|
||||
parameters:
|
||||
- name: camera_name
|
||||
@ -2997,12 +3064,14 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/vod/{year_month}/{day}/{hour}/{camera_name}":
|
||||
/vod/{year_month}/{day}/{hour}/{camera_name}:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
summary: Vod Hour No Timezone
|
||||
description: VOD for specific hour. Uses the default timezone (UTC).
|
||||
description: >-
|
||||
Returns an HLS playlist for the specified date-time on the specified
|
||||
camera. Append /master.m3u8 or /index.m3u8 for HLS playback.
|
||||
operationId: vod_hour_no_timezone_vod__year_month___day___hour___camera_name__get
|
||||
parameters:
|
||||
- name: year_month
|
||||
@ -3041,11 +3110,15 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/vod/{year_month}/{day}/{hour}/{camera_name}/{tz_name}":
|
||||
/vod/{year_month}/{day}/{hour}/{camera_name}/{tz_name}:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
summary: Vod Hour
|
||||
description: >-
|
||||
Returns an HLS playlist for the specified date-time (with timezone) on
|
||||
the specified camera. Append /master.m3u8 or /index.m3u8 for HLS
|
||||
playback.
|
||||
operationId: vod_hour_vod__year_month___day___hour___camera_name___tz_name__get
|
||||
parameters:
|
||||
- name: year_month
|
||||
@ -3090,11 +3163,14 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/vod/event/{event_id}":
|
||||
/vod/event/{event_id}:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
summary: Vod Event
|
||||
description: >-
|
||||
Returns an HLS playlist for the specified object. Append /master.m3u8 or
|
||||
/index.m3u8 for HLS playback.
|
||||
operationId: vod_event_vod_event__event_id__get
|
||||
parameters:
|
||||
- name: event_id
|
||||
@ -3115,11 +3191,15 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{event_id}/snapshot.jpg":
|
||||
/events/{event_id}/snapshot.jpg:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
summary: Event Snapshot
|
||||
description: >-
|
||||
Returns a snapshot image for the specified object id. NOTE: The query
|
||||
params only take affect while the event is in-progress. Once the event
|
||||
has ended the snapshot configuration is used.
|
||||
operationId: event_snapshot_events__event_id__snapshot_jpg_get
|
||||
parameters:
|
||||
- name: event_id
|
||||
@ -3190,7 +3270,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{event_id}/thumbnail.{extension}":
|
||||
/events/{event_id}/thumbnail.{extension}:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -3240,7 +3320,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/grid.jpg":
|
||||
/{camera_name}/grid.jpg:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -3279,7 +3359,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{event_id}/snapshot-clean.png":
|
||||
/events/{event_id}/snapshot-clean.png:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -3311,7 +3391,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{event_id}/clip.mp4":
|
||||
/events/{event_id}/clip.mp4:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -3336,7 +3416,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{event_id}/preview.gif":
|
||||
/events/{event_id}/preview.gif:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -3361,7 +3441,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/start/{start_ts}/end/{end_ts}/preview.gif":
|
||||
/{camera_name}/start/{start_ts}/end/{end_ts}/preview.gif:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -3407,7 +3487,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/start/{start_ts}/end/{end_ts}/preview.mp4":
|
||||
/{camera_name}/start/{start_ts}/end/{end_ts}/preview.mp4:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -3453,7 +3533,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/review/{event_id}/preview":
|
||||
/review/{event_id}/preview:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -3488,7 +3568,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/preview/{file_name}/thumbnail.webp":
|
||||
/preview/{file_name}/thumbnail.webp:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -3514,7 +3594,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/preview/{file_name}/thumbnail.jpg":
|
||||
/preview/{file_name}/thumbnail.jpg:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -3540,7 +3620,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/{label}/thumbnail.jpg":
|
||||
/{camera_name}/{label}/thumbnail.jpg:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -3571,7 +3651,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/{label}/best.jpg":
|
||||
/{camera_name}/{label}/best.jpg:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -3602,7 +3682,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/{label}/clip.mp4":
|
||||
/{camera_name}/{label}/clip.mp4:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -3633,7 +3713,7 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/{label}/snapshot.jpg":
|
||||
/{camera_name}/{label}/snapshot.jpg:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
@ -3985,6 +4065,23 @@ components:
|
||||
title: End Time
|
||||
type: object
|
||||
title: EventsEndBody
|
||||
EventsLPRBody:
|
||||
properties:
|
||||
recognizedLicensePlate:
|
||||
type: string
|
||||
maxLength: 100
|
||||
title: Recognized License Plate
|
||||
recognizedLicensePlateScore:
|
||||
anyOf:
|
||||
- type: number
|
||||
maximum: 1
|
||||
exclusiveMinimum: 0
|
||||
- type: "null"
|
||||
title: Score for recognized license plate
|
||||
type: object
|
||||
required:
|
||||
- recognizedLicensePlate
|
||||
title: EventsLPRBody
|
||||
EventsSubLabelBody:
|
||||
properties:
|
||||
subLabel:
|
||||
@ -4105,6 +4202,15 @@ components:
|
||||
- thumbnails
|
||||
- snapshot
|
||||
title: RegenerateDescriptionEnum
|
||||
RenameFaceBody:
|
||||
properties:
|
||||
new_name:
|
||||
type: string
|
||||
title: New Name
|
||||
type: object
|
||||
required:
|
||||
- new_name
|
||||
title: RenameFaceBody
|
||||
ReviewActivityMotionResponse:
|
||||
properties:
|
||||
start_time:
|
||||
|
||||
BIN
docs/static/img/plus/suggestions.webp
vendored
Normal file
BIN
docs/static/img/plus/suggestions.webp
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 71 KiB |
@ -33,6 +33,7 @@ from frigate.models import User
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=[Tags.auth])
|
||||
VALID_ROLES = ["admin", "viewer"]
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
@ -70,7 +71,7 @@ def get_remote_addr(request: Request):
|
||||
)
|
||||
if trusted_proxy.version == 4:
|
||||
ipv4 = ip.ipv4_mapped if ip.version == 6 else ip
|
||||
if ipv4 in trusted_proxy:
|
||||
if ipv4 is not None and ipv4 in trusted_proxy:
|
||||
trusted = True
|
||||
logger.debug(f"Trusted: {str(ip)} by {str(trusted_proxy)}")
|
||||
break
|
||||
@ -272,12 +273,13 @@ def auth(request: Request):
|
||||
else proxy_config.default_role
|
||||
)
|
||||
|
||||
# if comma-separated with "admin", use "admin", else use default role
|
||||
success_response.headers["remote-role"] = (
|
||||
"admin"
|
||||
if role
|
||||
and "admin" in [r.strip() for r in role.split(proxy_config.separator)]
|
||||
else proxy_config.default_role
|
||||
# if comma-separated with "admin", use "admin",
|
||||
# if comma-separated with "viewer", use "viewer",
|
||||
# else use default role
|
||||
|
||||
roles = [r.strip() for r in role.split(proxy_config.separator)] if role else []
|
||||
success_response.headers["remote-role"] = next(
|
||||
(r for r in VALID_ROLES if r in roles), proxy_config.default_role
|
||||
)
|
||||
|
||||
return success_response
|
||||
@ -402,7 +404,7 @@ def login(request: Request, body: AppPostLoginBody):
|
||||
password_hash = db_user.password_hash
|
||||
if verify_password(password, password_hash):
|
||||
role = getattr(db_user, "role", "viewer")
|
||||
if role not in ["admin", "viewer"]:
|
||||
if role not in VALID_ROLES:
|
||||
role = "viewer" # Enforce valid roles
|
||||
expiration = int(time.time()) + JWT_SESSION_LENGTH
|
||||
encoded_jwt = create_encoded_jwt(user, role, expiration, request.app.jwt_token)
|
||||
@ -432,7 +434,7 @@ def create_user(
|
||||
if not re.match("^[A-Za-z0-9._]+$", body.username):
|
||||
return JSONResponse(content={"message": "Invalid username"}, status_code=400)
|
||||
|
||||
role = body.role if body.role in ["admin", "viewer"] else "viewer"
|
||||
role = body.role if body.role in VALID_ROLES else "viewer"
|
||||
password_hash = hash_password(body.password, iterations=HASH_ITERATIONS)
|
||||
User.insert(
|
||||
{
|
||||
@ -503,7 +505,7 @@ async def update_role(
|
||||
return JSONResponse(
|
||||
content={"message": "Cannot modify admin user's role"}, status_code=403
|
||||
)
|
||||
if body.role not in ["admin", "viewer"]:
|
||||
if body.role not in VALID_ROLES:
|
||||
return JSONResponse(
|
||||
content={"message": "Role must be 'admin' or 'viewer'"}, status_code=400
|
||||
)
|
||||
|
||||
@ -800,7 +800,10 @@ def vod_event(event_id: str):
|
||||
)
|
||||
|
||||
|
||||
@router.get("/events/{event_id}/snapshot.jpg")
|
||||
@router.get(
|
||||
"/events/{event_id}/snapshot.jpg",
|
||||
description="Returns a snapshot image for the specified object id. NOTE: The query params only take affect while the event is in-progress. Once the event has ended the snapshot configuration is used.",
|
||||
)
|
||||
def event_snapshot(
|
||||
request: Request,
|
||||
event_id: str,
|
||||
|
||||
@ -58,9 +58,8 @@ async def review(
|
||||
)
|
||||
|
||||
clauses = [
|
||||
(ReviewSegment.start_time > after)
|
||||
& (ReviewSegment.start_time < before)
|
||||
& ((ReviewSegment.end_time.is_null(True)) | (ReviewSegment.end_time < before))
|
||||
(ReviewSegment.start_time < before)
|
||||
& ((ReviewSegment.end_time.is_null(True)) | (ReviewSegment.end_time > after))
|
||||
]
|
||||
|
||||
if cameras != "all":
|
||||
|
||||
@ -73,6 +73,7 @@ from frigate.track.object_processing import TrackedObjectProcessor
|
||||
from frigate.util.builtin import empty_and_close_queue
|
||||
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
|
||||
from frigate.util.object import get_camera_regions_grid
|
||||
from frigate.util.services import set_file_limit
|
||||
from frigate.version import VERSION
|
||||
from frigate.video import capture_camera, track_camera
|
||||
from frigate.watchdog import FrigateWatchdog
|
||||
@ -632,6 +633,9 @@ class FrigateApp:
|
||||
# Ensure global state.
|
||||
self.ensure_dirs()
|
||||
|
||||
# Set soft file limits.
|
||||
set_file_limit()
|
||||
|
||||
# Start frigate services.
|
||||
self.init_camera_metrics()
|
||||
self.init_queues()
|
||||
|
||||
@ -426,7 +426,7 @@ class CameraState:
|
||||
current_thumb_frames = {
|
||||
obj.thumbnail_data["frame_time"]
|
||||
for obj in tracked_objects.values()
|
||||
if not obj.false_positive and obj.thumbnail_data is not None
|
||||
if obj.thumbnail_data is not None
|
||||
}
|
||||
current_best_frames = {
|
||||
obj.thumbnail_data["frame_time"] for obj in self.best_objects.values()
|
||||
@ -445,7 +445,7 @@ class CameraState:
|
||||
obj.thumbnail_data["frame_time"] if obj.thumbnail_data else None
|
||||
)
|
||||
logger.debug(
|
||||
f"{self.name}: Tracked object {obj_id} thumbnail frame_time: {thumb_time}"
|
||||
f"{self.name}: Tracked object {obj_id} thumbnail frame_time: {thumb_time}, false positive: {obj.false_positive}"
|
||||
)
|
||||
for t in thumb_frames_to_delete:
|
||||
object_id = self.frame_cache[t].get("object_id", "unknown")
|
||||
|
||||
@ -84,7 +84,7 @@ class FaceRecognitionConfig(FrigateBaseModel):
|
||||
default=1,
|
||||
gt=0,
|
||||
le=6,
|
||||
title="Min face attempts for the sub label to be applied to the person object.",
|
||||
title="Min face recognitions for the sub label to be applied to the person object.",
|
||||
)
|
||||
save_attempts: int = Field(
|
||||
default=100, ge=0, title="Number of face attempts to save in the train tab."
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from .base import FrigateBaseModel
|
||||
@ -11,8 +13,8 @@ class StatsConfig(FrigateBaseModel):
|
||||
network_bandwidth: bool = Field(
|
||||
default=False, title="Enable network bandwidth for ffmpeg processes."
|
||||
)
|
||||
sriov: bool = Field(
|
||||
default=False, title="Treat device as SR-IOV to support GPU stats."
|
||||
intel_gpu_device: Optional[str] = Field(
|
||||
default=None, title="Define the device to use when gathering SR-IOV stats."
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -26,6 +26,7 @@ DEFAULT_ATTRIBUTE_LABEL_MAP = {
|
||||
"car": [
|
||||
"amazon",
|
||||
"an_post",
|
||||
"canada_post",
|
||||
"dhl",
|
||||
"dpd",
|
||||
"fedex",
|
||||
@ -35,6 +36,7 @@ DEFAULT_ATTRIBUTE_LABEL_MAP = {
|
||||
"postnl",
|
||||
"postnord",
|
||||
"purolator",
|
||||
"royal_mail",
|
||||
"ups",
|
||||
"usps",
|
||||
],
|
||||
|
||||
@ -109,7 +109,13 @@ class BirdRealTimeProcessor(RealTimeProcessorApi):
|
||||
obj_data["box"][1],
|
||||
obj_data["box"][2],
|
||||
obj_data["box"][3],
|
||||
224,
|
||||
int(
|
||||
max(
|
||||
obj_data["box"][1] - obj_data["box"][0],
|
||||
obj_data["box"][3] - obj_data["box"][2],
|
||||
)
|
||||
* 1.1
|
||||
),
|
||||
1.0,
|
||||
)
|
||||
|
||||
|
||||
@ -303,9 +303,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
self.person_face_history[id]
|
||||
)
|
||||
|
||||
if len(self.person_face_history[id]) < self.face_config.min_faces:
|
||||
weighted_sub_label = "unknown"
|
||||
|
||||
self.requestor.send_data(
|
||||
"tracked_object_update",
|
||||
json.dumps(
|
||||
@ -489,6 +486,10 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
|
||||
best_name = max(weighted_scores, key=weighted_scores.get)
|
||||
|
||||
# If the number of faces for this person < min_faces, we are not confident it is a correct result
|
||||
if counts[best_name] < self.face_config.min_faces:
|
||||
return None, 0.0
|
||||
|
||||
# If the best name has the same number of results as another name, we are not confident it is a correct result
|
||||
for name, count in counts.items():
|
||||
if name != best_name and counts[best_name] == count:
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import ctypes
|
||||
import logging
|
||||
import platform
|
||||
|
||||
import numpy as np
|
||||
|
||||
@ -219,6 +220,14 @@ class TensorRtDetector(DetectionApi):
|
||||
]
|
||||
|
||||
def __init__(self, detector_config: TensorRTDetectorConfig):
|
||||
if platform.machine() == "x86_64":
|
||||
logger.error(
|
||||
"TensorRT detector is no longer supported on amd64 system. Please use ONNX detector instead, see https://docs.frigate.video/configuration/object_detectors#onnx for more information."
|
||||
)
|
||||
raise ImportError(
|
||||
"TensorRT detector is no longer supported on amd64 system. Please use ONNX detector instead, see https://docs.frigate.video/configuration/object_detectors#onnx for more information."
|
||||
)
|
||||
|
||||
assert TRT_SUPPORT, (
|
||||
f"TensorRT libraries not found, {DETECTOR_KEY} detector not present"
|
||||
)
|
||||
|
||||
@ -1,12 +1,14 @@
|
||||
"""SQLite-vec embeddings database."""
|
||||
|
||||
import datetime
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
|
||||
from numpy import ndarray
|
||||
from PIL import Image
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
@ -199,14 +201,31 @@ class Embeddings:
|
||||
@param: upsert If embedding should be upserted into vec DB
|
||||
"""
|
||||
start = datetime.datetime.now().timestamp()
|
||||
ids = list(event_thumbs.keys())
|
||||
embeddings = self.vision_embedding(list(event_thumbs.values()))
|
||||
valid_ids = []
|
||||
valid_thumbs = []
|
||||
for eid, thumb in event_thumbs.items():
|
||||
try:
|
||||
img = Image.open(io.BytesIO(thumb))
|
||||
img.verify() # Will raise if corrupt
|
||||
valid_ids.append(eid)
|
||||
valid_thumbs.append(thumb)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Embeddings reindexing: Skipping corrupt thumbnail for event {eid}: {e}"
|
||||
)
|
||||
|
||||
if not valid_thumbs:
|
||||
logger.warning(
|
||||
"Embeddings reindexing: No valid thumbnails to embed in this batch."
|
||||
)
|
||||
return []
|
||||
|
||||
embeddings = self.vision_embedding(valid_thumbs)
|
||||
|
||||
if upsert:
|
||||
items = []
|
||||
|
||||
for i in range(len(ids)):
|
||||
items.append(ids[i])
|
||||
for i in range(len(valid_ids)):
|
||||
items.append(valid_ids[i])
|
||||
items.append(serialize(embeddings[i]))
|
||||
self.image_eps.update()
|
||||
|
||||
@ -214,12 +233,12 @@ class Embeddings:
|
||||
"""
|
||||
INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding)
|
||||
VALUES {}
|
||||
""".format(", ".join(["(?, ?)"] * len(ids))),
|
||||
""".format(", ".join(["(?, ?)"] * len(valid_ids))),
|
||||
items,
|
||||
)
|
||||
|
||||
duration = datetime.datetime.now().timestamp() - start
|
||||
self.text_inference_speed.update(duration / len(ids))
|
||||
self.text_inference_speed.update(duration / len(valid_ids))
|
||||
|
||||
return embeddings
|
||||
|
||||
@ -315,27 +334,24 @@ class Embeddings:
|
||||
.paginate(current_page, batch_size)
|
||||
)
|
||||
|
||||
while len(events) > 0:
|
||||
while events:
|
||||
event: Event
|
||||
batch_thumbs = {}
|
||||
batch_descs = {}
|
||||
for event in events:
|
||||
thumbnail = get_event_thumbnail_bytes(event)
|
||||
|
||||
if thumbnail is None:
|
||||
continue
|
||||
|
||||
batch_thumbs[event.id] = thumbnail
|
||||
totals["thumbnails"] += 1
|
||||
totals["processed_objects"] += 1
|
||||
|
||||
if description := event.data.get("description", "").strip():
|
||||
batch_descs[event.id] = description
|
||||
totals["descriptions"] += 1
|
||||
|
||||
totals["processed_objects"] += 1
|
||||
if thumbnail := get_event_thumbnail_bytes(event):
|
||||
batch_thumbs[event.id] = thumbnail
|
||||
totals["thumbnails"] += 1
|
||||
|
||||
# run batch embedding
|
||||
self.batch_embed_thumbnail(batch_thumbs)
|
||||
if batch_thumbs:
|
||||
self.batch_embed_thumbnail(batch_thumbs)
|
||||
|
||||
if batch_descs:
|
||||
self.batch_embed_description(batch_descs)
|
||||
|
||||
@ -491,6 +491,6 @@ def parse_preset_output_record(arg: Any, force_record_hvc1: bool) -> list[str]:
|
||||
|
||||
if force_record_hvc1:
|
||||
# Apple only supports HEVC if it is hvc1 (vs. hev1)
|
||||
preset += FFMPEG_HVC1_ARGS
|
||||
return preset + FFMPEG_HVC1_ARGS
|
||||
|
||||
return preset
|
||||
|
||||
@ -265,9 +265,15 @@ class OnvifController:
|
||||
"RelativeZoomTranslationSpace"
|
||||
][zoom_space_id]["URI"]
|
||||
else:
|
||||
if "Zoom" in move_request["Translation"]:
|
||||
if (
|
||||
move_request["Translation"] is not None
|
||||
and "Zoom" in move_request["Translation"]
|
||||
):
|
||||
del move_request["Translation"]["Zoom"]
|
||||
if "Zoom" in move_request["Speed"]:
|
||||
if (
|
||||
move_request["Speed"] is not None
|
||||
and "Zoom" in move_request["Speed"]
|
||||
):
|
||||
del move_request["Speed"]["Zoom"]
|
||||
logger.debug(
|
||||
f"{camera_name}: Relative move request after deleting zoom: {move_request}"
|
||||
|
||||
@ -201,7 +201,7 @@ async def set_gpu_stats(
|
||||
continue
|
||||
|
||||
# intel QSV GPU
|
||||
intel_usage = get_intel_gpu_stats(config.telemetry.stats.sriov)
|
||||
intel_usage = get_intel_gpu_stats(config.telemetry.stats.intel_gpu_device)
|
||||
|
||||
if intel_usage is not None:
|
||||
stats["intel-qsv"] = intel_usage or {"gpu": "", "mem": ""}
|
||||
@ -226,7 +226,9 @@ async def set_gpu_stats(
|
||||
continue
|
||||
|
||||
# intel VAAPI GPU
|
||||
intel_usage = get_intel_gpu_stats(config.telemetry.stats.sriov)
|
||||
intel_usage = get_intel_gpu_stats(
|
||||
config.telemetry.stats.intel_gpu_device
|
||||
)
|
||||
|
||||
if intel_usage is not None:
|
||||
stats["intel-vaapi"] = intel_usage or {"gpu": "", "mem": ""}
|
||||
|
||||
@ -48,8 +48,9 @@ class TestHttpReview(BaseTestHttp):
|
||||
################################### GET /review Endpoint ########################################################
|
||||
####################################################################################################################
|
||||
|
||||
# Does not return any data point since the end time (before parameter) is not passed and the review segment end_time is 2 seconds from now
|
||||
def test_get_review_no_filters_no_matches(self):
|
||||
def test_get_review_that_overlaps_default_period(self):
|
||||
"""Test that a review item that starts during the default period
|
||||
but ends after is included in the results."""
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
@ -57,7 +58,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
response = client.get("/review")
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert len(response_json) == 0
|
||||
assert len(response_json) == 1
|
||||
|
||||
def test_get_review_no_filters(self):
|
||||
now = datetime.now().timestamp()
|
||||
@ -73,11 +74,13 @@ class TestHttpReview(BaseTestHttp):
|
||||
assert response_json[0]["has_been_reviewed"] == False
|
||||
|
||||
def test_get_review_with_time_filter_no_matches(self):
|
||||
"""Test that review items outside the range are not returned."""
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id, now, now + 2)
|
||||
super().insert_mock_review_segment(id, now - 2, now - 1)
|
||||
super().insert_mock_review_segment(f"{id}2", now + 4, now + 5)
|
||||
params = {
|
||||
"after": now,
|
||||
"before": now + 3,
|
||||
|
||||
@ -5,6 +5,7 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import resource
|
||||
import signal
|
||||
import subprocess as sp
|
||||
import traceback
|
||||
@ -256,7 +257,7 @@ def get_amd_gpu_stats() -> Optional[dict[str, str]]:
|
||||
return results
|
||||
|
||||
|
||||
def get_intel_gpu_stats(sriov: bool) -> Optional[dict[str, str]]:
|
||||
def get_intel_gpu_stats(intel_gpu_device: Optional[str]) -> Optional[dict[str, str]]:
|
||||
"""Get stats using intel_gpu_top."""
|
||||
|
||||
def get_stats_manually(output: str) -> dict[str, str]:
|
||||
@ -303,8 +304,8 @@ def get_intel_gpu_stats(sriov: bool) -> Optional[dict[str, str]]:
|
||||
"1",
|
||||
]
|
||||
|
||||
if sriov:
|
||||
intel_gpu_top_command += ["-d", "sriov"]
|
||||
if intel_gpu_device:
|
||||
intel_gpu_top_command += ["-d", intel_gpu_device]
|
||||
|
||||
try:
|
||||
p = sp.run(
|
||||
@ -751,3 +752,19 @@ def process_logs(
|
||||
log_lines.append(dedup_message)
|
||||
|
||||
return len(log_lines), log_lines[start:end]
|
||||
|
||||
|
||||
def set_file_limit() -> None:
|
||||
# Newer versions of containerd 2.X+ impose a very low soft file limit of 1024
|
||||
# This applies to OSs like HA OS (see https://github.com/home-assistant/operating-system/issues/4110)
|
||||
# Attempt to increase this limit
|
||||
soft_limit = int(os.getenv("SOFT_FILE_LIMIT", "65536") or "65536")
|
||||
|
||||
current_soft, current_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
logger.debug(f"Current file limits - Soft: {current_soft}, Hard: {current_hard}")
|
||||
|
||||
new_soft = min(soft_limit, current_hard)
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft, current_hard))
|
||||
logger.debug(
|
||||
f"File limit set. New soft limit: {new_soft}, Hard limit remains: {current_hard}"
|
||||
)
|
||||
|
||||
@ -59,6 +59,11 @@ def create_ground_plane(zone_points, distances):
|
||||
:param y: Y-coordinate in the image
|
||||
:return: Real-world distance per pixel at the given (x, y) coordinate
|
||||
"""
|
||||
|
||||
# Return 0 if divide by zero would occur
|
||||
if (B[0] - A[0]) == 0 or (D[1] - A[1]) == 0:
|
||||
return 0
|
||||
|
||||
# Normalize x and y within the zone
|
||||
x_norm = (x - A[0]) / (B[0] - A[0])
|
||||
y_norm = (y - A[1]) / (D[1] - A[1])
|
||||
|
||||
@ -208,7 +208,35 @@ class CameraWatchdog(threading.Thread):
|
||||
|
||||
return self.config.enabled
|
||||
|
||||
def run(self):
|
||||
def reset_capture_thread(
|
||||
self, terminate: bool = True, drain_output: bool = True
|
||||
) -> None:
|
||||
if terminate:
|
||||
self.ffmpeg_detect_process.terminate()
|
||||
try:
|
||||
self.logger.info("Waiting for ffmpeg to exit gracefully...")
|
||||
|
||||
if drain_output:
|
||||
self.ffmpeg_detect_process.communicate(timeout=30)
|
||||
else:
|
||||
self.ffmpeg_detect_process.wait(timeout=30)
|
||||
except sp.TimeoutExpired:
|
||||
self.logger.info("FFmpeg did not exit. Force killing...")
|
||||
self.ffmpeg_detect_process.kill()
|
||||
|
||||
if drain_output:
|
||||
self.ffmpeg_detect_process.communicate()
|
||||
else:
|
||||
self.ffmpeg_detect_process.wait()
|
||||
|
||||
self.logger.error(
|
||||
"The following ffmpeg logs include the last 100 lines prior to exit."
|
||||
)
|
||||
self.logpipe.dump()
|
||||
self.logger.info("Restarting ffmpeg...")
|
||||
self.start_ffmpeg_detect()
|
||||
|
||||
def run(self) -> None:
|
||||
if self._update_enabled_state():
|
||||
self.start_all_ffmpeg()
|
||||
|
||||
@ -235,24 +263,7 @@ class CameraWatchdog(threading.Thread):
|
||||
self.logger.error(
|
||||
f"Ffmpeg process crashed unexpectedly for {self.camera_name}."
|
||||
)
|
||||
self.logger.error(
|
||||
"The following ffmpeg logs include the last 100 lines prior to exit."
|
||||
)
|
||||
self.logpipe.dump()
|
||||
self.start_ffmpeg_detect()
|
||||
elif now - self.capture_thread.current_frame.value > 20:
|
||||
self.camera_fps.value = 0
|
||||
self.logger.info(
|
||||
f"No frames received from {self.camera_name} in 20 seconds. Exiting ffmpeg..."
|
||||
)
|
||||
self.ffmpeg_detect_process.terminate()
|
||||
try:
|
||||
self.logger.info("Waiting for ffmpeg to exit gracefully...")
|
||||
self.ffmpeg_detect_process.communicate(timeout=30)
|
||||
except sp.TimeoutExpired:
|
||||
self.logger.info("FFmpeg did not exit. Force killing...")
|
||||
self.ffmpeg_detect_process.kill()
|
||||
self.ffmpeg_detect_process.communicate()
|
||||
self.reset_capture_thread(terminate=False)
|
||||
elif self.camera_fps.value >= (self.config.detect.fps + 10):
|
||||
self.fps_overflow_count += 1
|
||||
|
||||
@ -262,14 +273,13 @@ class CameraWatchdog(threading.Thread):
|
||||
self.logger.info(
|
||||
f"{self.camera_name} exceeded fps limit. Exiting ffmpeg..."
|
||||
)
|
||||
self.ffmpeg_detect_process.terminate()
|
||||
try:
|
||||
self.logger.info("Waiting for ffmpeg to exit gracefully...")
|
||||
self.ffmpeg_detect_process.communicate(timeout=30)
|
||||
except sp.TimeoutExpired:
|
||||
self.logger.info("FFmpeg did not exit. Force killing...")
|
||||
self.ffmpeg_detect_process.kill()
|
||||
self.ffmpeg_detect_process.communicate()
|
||||
self.reset_capture_thread(drain_output=False)
|
||||
elif now - self.capture_thread.current_frame.value > 20:
|
||||
self.camera_fps.value = 0
|
||||
self.logger.info(
|
||||
f"No frames received from {self.camera_name} in 20 seconds. Exiting ffmpeg..."
|
||||
)
|
||||
self.reset_capture_thread()
|
||||
else:
|
||||
# process is running normally
|
||||
self.fps_overflow_count = 0
|
||||
|
||||
@ -2,6 +2,9 @@
|
||||
"form": {
|
||||
"password": "كلمة السر",
|
||||
"user": "أسم المستخدم",
|
||||
"login": "تسجيل الدخول"
|
||||
"login": "تسجيل الدخول",
|
||||
"errors": {
|
||||
"usernameRequired": "اسم المستخدم مطلوب"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,8 @@
|
||||
{
|
||||
"noRecordingsFoundForThisTime": "لا يوجد تسجيلات في هذا التوقيت",
|
||||
"noPreviewFound": "لا يوجد معاينة",
|
||||
"noPreviewFoundFor": "لا يوجد معاينة لـ{{cameraName}}"
|
||||
"noPreviewFoundFor": "لا يوجد معاينة لـ{{cameraName}}",
|
||||
"submitFrigatePlus": {
|
||||
"title": "هل ترغب بإرسال هذه الصوره الى Frigate+؟"
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
"detections": "الإكتشافات",
|
||||
"alerts": "الإنذارات",
|
||||
"motion": {
|
||||
"label": "الحركة"
|
||||
"label": "الحركة",
|
||||
"only": "حركة فقط"
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,8 @@
|
||||
{
|
||||
"exploreMore": "اكتشف المزيد من أجسام {{label}}",
|
||||
"documentTitle": "اكتشف - فرايجيت",
|
||||
"generativeAI": "ذكاء اصطناعي مولد"
|
||||
"generativeAI": "ذكاء اصطناعي مولد",
|
||||
"exploreIsUnavailable": {
|
||||
"title": "المتصفح غير متاح"
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,8 @@
|
||||
{
|
||||
"documentTitle": "بث حي - فرايجيت",
|
||||
"documentTitle.withCamera": "{{camera}} - بث حي - فرايجيت",
|
||||
"lowBandwidthMode": "وضع موفر للبيانات"
|
||||
"lowBandwidthMode": "وضع موفر للبيانات",
|
||||
"twoWayTalk": {
|
||||
"enable": "تفعيل المكالمات ثنائية الاتجاه"
|
||||
}
|
||||
}
|
||||
|
||||
@ -164,7 +164,7 @@
|
||||
"rimshot": "Rimshot",
|
||||
"drum_roll": "Rul·lat de tambor",
|
||||
"bass_drum": "Bombo",
|
||||
"timpani": "Timpani",
|
||||
"timpani": "Timpà",
|
||||
"tabla": "Tabla",
|
||||
"cymbal": "Plat",
|
||||
"hi_hat": "Charles",
|
||||
|
||||
@ -96,7 +96,8 @@
|
||||
"toast": {
|
||||
"success": {
|
||||
"updatedSublabel": "Subetiqueta actualitzada amb èxit.",
|
||||
"updatedLPR": "Matrícula actualitzada amb èxit."
|
||||
"updatedLPR": "Matrícula actualitzada amb èxit.",
|
||||
"regenerate": "El {{provider}} ha sol·licitat una nova descripció. En funció de la velocitat del vostre proveïdor, la nova descripció pot trigar un temps a regenerar-se."
|
||||
},
|
||||
"error": {
|
||||
"regenerate": "No s'ha pogut contactar amb {{provider}} per obtenir una nova descripció: {{errorMessage}}",
|
||||
@ -107,12 +108,16 @@
|
||||
"title": "Revisar detalls de l'element",
|
||||
"desc": "Revisar detalls de l'element",
|
||||
"tips": {
|
||||
"hasMissingObjects": "Ajusta la configuració si vols que Frigate guardi els objectes rastrejat de les seguents etiquetes: <em>{{objects}}</em>"
|
||||
"hasMissingObjects": "Ajusta la configuració si vols que Frigate guardi els objectes rastrejat de les seguents etiquetes: <em>{{objects}}</em>",
|
||||
"mismatch_one": "{{count}} objecte no disponible ha estat detectat i inclòs en aquest element de revisió. Aquest objecte tampoc no s'han calificat com una alerta o detecció o ja ha estat netejat mes amunt/eliminat.",
|
||||
"mismatch_many": "{{count}} objectes no disponibles han estat detectats i inclosos en aquest element de revisió. Aquests objectes tampoc no s'han calificat com una alerta o detecció o ja han estat netejats mes amunt/eliminats.",
|
||||
"mismatch_other": "{{count}} objectes no disponibles han estat detectats i inclosos en aquest element de revisió. Aquests objectes tampoc no s'han calificat com una alerta o detecció o ja han estat netejats mes amunt/eliminats."
|
||||
}
|
||||
},
|
||||
"label": "Etiqueta",
|
||||
"topScore": {
|
||||
"label": "Puntuació màxima"
|
||||
"label": "Puntuació màxima",
|
||||
"info": "El resultat superior és la mediana més alta per l'objecte seguit, així que pot diferir des del resultat mostrat en thumbnail de la búsqueda de recerca."
|
||||
},
|
||||
"estimatedSpeed": "Velocitat estimada",
|
||||
"button": {
|
||||
@ -193,7 +198,8 @@
|
||||
"noTrackedObjects": "No s'han trobat objectes rastrejats",
|
||||
"dialog": {
|
||||
"confirmDelete": {
|
||||
"title": "Confirmar la supressió"
|
||||
"title": "Confirmar la supressió",
|
||||
"desc": "Eliminant aquest objecte seguit borrarà l'snapshot, qualsevol embedding gravat, i qualsevol objecte associat. Les imatges gravades d'aquest objecte seguit en l'historial <em>NO</em> seràn eliminades.<br /><br />Estas segur que vols continuar?"
|
||||
}
|
||||
},
|
||||
"fetchingTrackedObjectsFailed": "Error al obtenir objectes rastrejats: {{errorMessage}}",
|
||||
|
||||
@ -145,7 +145,8 @@
|
||||
"all": "Tot",
|
||||
"motion": "Moviment",
|
||||
"active_objects": "Objectes actius"
|
||||
}
|
||||
},
|
||||
"notAllTips": "El vostre {{source}} registre de configuració de retenció s'ha posat en el mode <code>: {{effectiveRetainMode}}</code>, així que la gravaciò a demanda només seguirà segments amb {{effectiveRetainModeName}}."
|
||||
},
|
||||
"editLayout": {
|
||||
"label": "Editar el disseny",
|
||||
|
||||
@ -148,23 +148,27 @@
|
||||
"desc": "Habilita l'estimació de velocitat per a objectes dins d'aquesta zona. La zona ha de tenir exactament 4 punts."
|
||||
},
|
||||
"inertia": {
|
||||
"title": "Inèrcia"
|
||||
"title": "Inèrcia",
|
||||
"desc": "Especifica quants fotogrames ha d’estar un objecte dins d’una zona abans de considerar-se que hi és. <em>Per defecte: 3</em>"
|
||||
},
|
||||
"point_one": "{{count}} punt",
|
||||
"point_many": "{{count}} punts",
|
||||
"point_other": "{{count}} punts",
|
||||
"name": {
|
||||
"inputPlaceHolder": "Introduïu un nom…",
|
||||
"title": "Nom"
|
||||
"title": "Nom",
|
||||
"tips": "El nom ha de tenir almenys 2 caràcters i no pot coincidir amb el nom d'una càmera ni amb el d'una altra zona."
|
||||
},
|
||||
"label": "Zones",
|
||||
"desc": {
|
||||
"documentation": "Documentació"
|
||||
"documentation": "Documentació",
|
||||
"title": "Les zones permeten definir una àrea específica de la imatge per tal de determinar si un objecte es troba dins d'una àrea concreta o no."
|
||||
},
|
||||
"add": "Afegir Zona",
|
||||
"edit": "Editar zona",
|
||||
"loiteringTime": {
|
||||
"title": "Temps de merodeig"
|
||||
"title": "Temps de merodeig",
|
||||
"desc": "Estableix el temps mínim, en segons, que l'objecte ha d'estar dins la zona perquè s'activi. <em>Per defecte: 0</em>"
|
||||
},
|
||||
"allObjects": "Tots els objectes",
|
||||
"documentTitle": "Edita zona - Frigate",
|
||||
@ -188,10 +192,12 @@
|
||||
},
|
||||
"motionMasks": {
|
||||
"desc": {
|
||||
"documentation": "Documentació"
|
||||
"documentation": "Documentació",
|
||||
"title": "Les màscares de moviment s’utilitzen per evitar que certs tipus de moviment no desitjats activin la detecció. Si s’aplica una màscara excessiva, es dificultarà el seguiment dels objectes."
|
||||
},
|
||||
"context": {
|
||||
"documentation": "Llegir la documentació"
|
||||
"documentation": "Llegir la documentació",
|
||||
"title": "Les màscares de moviment s’utilitzen per evitar que certs tipus de moviment no desitjats activin la detecció (per exemple: branques d’arbres, marques temporals). Les màscares de moviment s’han d’utilitzar <em>amb molta moderació</em>, un excés de màscares dificultarà el seguiment dels objectes."
|
||||
},
|
||||
"polygonAreaTooLarge": {
|
||||
"documentation": "Llegir la documentació",
|
||||
@ -199,7 +205,7 @@
|
||||
"title": "La màscara de moviment cobreix el {{polygonArea}}% del camp de visió de la càmera. Les màscares de moviment molt grans no son recomanables."
|
||||
},
|
||||
"point_one": "{{count}} punt",
|
||||
"point_many": "",
|
||||
"point_many": "{{count}} punts",
|
||||
"point_other": "{{count}} punts",
|
||||
"label": "Màscara de moviment",
|
||||
"add": "Nova màscara de moviment",
|
||||
@ -254,11 +260,13 @@
|
||||
"notification": {
|
||||
"email": {
|
||||
"title": "Correu electrònic",
|
||||
"placeholder": "p. ex. exemple@email.com"
|
||||
"placeholder": "p. ex. exemple@email.com",
|
||||
"desc": "Es requereix un correu electrònic vàlid que s’utilitzarà per notificar-te si hi ha algun problema amb el servei de notificacions push."
|
||||
},
|
||||
"notificationSettings": {
|
||||
"documentation": "Llegir la documentació",
|
||||
"title": "Paràmetres de notificació"
|
||||
"title": "Paràmetres de notificació",
|
||||
"desc": "Frigate pot enviar notificacions push directament al teu dispositiu quan s’executa des del navegador o està instal·lat com a PWA (aplicació web progressiva)."
|
||||
},
|
||||
"deviceSpecific": "Paràmetres específics del dispositiu",
|
||||
"registerDevice": "Registrar aquest dispositiu",
|
||||
@ -276,7 +284,8 @@
|
||||
},
|
||||
"toast": {
|
||||
"success": {
|
||||
"settingSaved": "Els paràmetres de notificacions s'han desat."
|
||||
"settingSaved": "Els paràmetres de notificacions s'han desat.",
|
||||
"registered": "Registre de notificacions realitzat amb èxit. Cal reiniciar Frigate perquè es puguin enviar notificacions (inclosa una notificació de prova)."
|
||||
},
|
||||
"error": {
|
||||
"registerFailed": "No s'ha pogut desar el registre de notificacions."
|
||||
@ -290,7 +299,8 @@
|
||||
"title": "Notificacions",
|
||||
"notificationUnavailable": {
|
||||
"title": "Notificacions no disponibles",
|
||||
"documentation": "Llegir la documentació"
|
||||
"documentation": "Llegir la documentació",
|
||||
"desc": "Les notificacions push web requereixen un context segur (<code>https://…</code>). Aquesta és una limitació del navegador. Accedeix a Frigate de manera segura per utilitzar les notificacions."
|
||||
},
|
||||
"unsavedChanges": "Canvis de notificació no desats",
|
||||
"globalSettings": {
|
||||
@ -304,7 +314,8 @@
|
||||
},
|
||||
"camera": {
|
||||
"streams": {
|
||||
"title": "Transmissions"
|
||||
"title": "Transmissions",
|
||||
"desc": "Desactiva temporalment una càmera fins que es reiniciï Frigate. La desactivació d'una càmera atura completament el processament de les transmissions d'aquesta càmera per part de Frigate. La detecció, gravació i depuració no estaran disponibles.<br /><em>Nota: Això no desactiva les retransmissions de go2rtc.</em>"
|
||||
},
|
||||
"title": "Paràmetres de la càmera",
|
||||
"reviewClassification": {
|
||||
@ -321,25 +332,32 @@
|
||||
"success": "S'ha desat la configuració de la classificació de revisió. Reinicia Frigate per aplicar els canvis."
|
||||
},
|
||||
"zoneObjectDetectionsTips": {
|
||||
"text": "Tots els objectes {{detectionsLabels}} no classificats a la {{zone}} de {{cameraName}} es mostraràn com a Deteccions."
|
||||
}
|
||||
"text": "Tots els objectes {{detectionsLabels}} no classificats a la {{zone}} de {{cameraName}} es mostraràn com a Deteccions.",
|
||||
"notSelectDetections": "Tots els objectes {{detectionsLabels}} detectats a {{zone}} de la càmera {{cameraName}} que no estiguin categoritzats com a Alertes es mostraran com a Deteccions, independentment de la zona en què es trobin.",
|
||||
"regardlessOfZoneObjectDetectionsTips": "Tots els objectes {{detectionsLabels}} no categoritzats a {{cameraName}} es mostraran com a Deteccions independentment de la zona en què es trobin."
|
||||
},
|
||||
"objectDetectionsTips": "Tots els objectes {{detectionsLabels}} no categoritzats a {{cameraName}} es mostraran com a Deteccions independentment de la zona en què es trobin.",
|
||||
"desc": "Frigate categoritza els elements de revisió com a Alertes i Deteccions. Per defecte, tots els objectes de tipus <em>persona</em> i <em>cotxe</em> es consideren Alertes. Pots afinar la categorització dels teus elements de revisió configurant zones requerides per a aquests."
|
||||
},
|
||||
"review": {
|
||||
"alerts": "Alertes ",
|
||||
"detections": "Deteccions ",
|
||||
"title": "Revisar"
|
||||
"title": "Revisar",
|
||||
"desc": "Habilita o deshabilita temporalment les alertes i deteccions per a aquesta càmera fins que es reiniciï Frigate. Quan estigui desactivat, no es generaran nous elements de revisió. "
|
||||
}
|
||||
},
|
||||
"motionDetectionTuner": {
|
||||
"Threshold": {
|
||||
"title": "Llindar"
|
||||
"title": "Llindar",
|
||||
"desc": "El valor del llindar determina quanta variació en la luminància d’un píxel cal perquè es consideri moviment. <em>Per defecte: 30</em>"
|
||||
},
|
||||
"contourArea": {
|
||||
"title": "Àrea de contorn",
|
||||
"desc": "El valor de l’àrea del contorn s’utilitza per decidir quins grups de píxels canviats es consideren moviment. <em>Valor per defecte: 10</em>"
|
||||
},
|
||||
"desc": {
|
||||
"documentation": "Llegeix la guia d'ajust de detecció de moviment"
|
||||
"documentation": "Llegeix la guia d'ajust de detecció de moviment",
|
||||
"title": "Frigate utilitza la detecció de moviment com a primer filtre per comprovar si hi ha alguna activitat a la imatge que valgui la pena analitzar amb la detecció d’objectes."
|
||||
},
|
||||
"improveContrast": {
|
||||
"title": "Millorar contrast",
|
||||
@ -362,7 +380,8 @@
|
||||
},
|
||||
"regions": {
|
||||
"title": "Regions",
|
||||
"desc": "Mostre un requadre de la regió d'interés enviat al detector d'objectes"
|
||||
"desc": "Mostre un requadre de la regió d'interés enviat al detector d'objectes",
|
||||
"tips": "<p><strong>Requadres de Regió</strong></p><br><p>Requadres verds es sobreposaran a les àrees d’interès de la imatge que s'envien al detector d’objectes.</p>"
|
||||
},
|
||||
"objectShapeFilterDrawing": {
|
||||
"score": "Puntuació",
|
||||
@ -370,7 +389,8 @@
|
||||
"ratio": "Proporció",
|
||||
"area": "Àrea",
|
||||
"title": "Dibuix del filtre de forma de l'objecte",
|
||||
"desc": "Dibuixa un rectangle a la imatge per veure detalls d'àrea i proporció"
|
||||
"desc": "Dibuixa un rectangle a la imatge per veure detalls d'àrea i proporció",
|
||||
"tips": "Habilita aquesta opció per dibuixar un rectangle a la imatge de la càmera que mostri la seva àrea i proporció. Aquests valors es poden utilitzar després per configurar els paràmetres del filtre de forma d’objecte a la teva configuració."
|
||||
},
|
||||
"zones": {
|
||||
"title": "Zones",
|
||||
@ -383,7 +403,8 @@
|
||||
"boundingBoxes": {
|
||||
"title": "Caixes delimitadores",
|
||||
"colors": {
|
||||
"label": "Colors de la caixa delimitadora de l'objecte"
|
||||
"label": "Colors de la caixa delimitadora de l'objecte",
|
||||
"info": "<li>En iniciar, s'assignarà un color diferent a cada etiqueta d’objecte</li> <li>Una línia fina de color blau fosc indica que l’objecte no està detectat en aquest moment</li> <li>Una línia fina de color gris indica que l’objecte està detectat com a estacionari</li> <li>Una línia gruixuda indica que l’objecte és el subjecte de l’autoseguiment (quan està activat)</li>"
|
||||
},
|
||||
"desc": "Mostra les caixes delimitadores al voltant dels objectes rastrejats"
|
||||
},
|
||||
@ -392,7 +413,8 @@
|
||||
"desc": "Mostra requadres al voltant de les àrees on s'ha detectat moviment",
|
||||
"tips": "<p><strong>Caixes de moviment</strong></p><br><p>Es sobreposaran requadres vermells a les àrees del fotograma on actualment s’estigui detectant moviment.</p>"
|
||||
},
|
||||
"detectorDesc": "Frigate fa servir els teus detectors ({{detectors}}) per a detectar objectes a les imatges de la teva càmera."
|
||||
"detectorDesc": "Frigate fa servir els teus detectors ({{detectors}}) per a detectar objectes a les imatges de la teva càmera.",
|
||||
"desc": "La vista de depuració mostra en temps real els objectes rastrejats i les seves estadístiques. La llista d’objectes mostra un resum amb retard temporal dels objectes detectats."
|
||||
},
|
||||
"users": {
|
||||
"table": {
|
||||
@ -500,7 +522,8 @@
|
||||
},
|
||||
"title": "Configuració d'instantànies",
|
||||
"documentation": "Llegir la documentació",
|
||||
"desc": "Per a enviar a Frigate+ fa falta que tan la instantània com la instantània <code>clean_copy</code> estiguin habilitades a la configuració."
|
||||
"desc": "Per a enviar a Frigate+ fa falta que tan la instantània com la instantània <code>clean_copy</code> estiguin habilitades a la configuració.",
|
||||
"cleanCopyWarning": "Algunes càmeres tenen les captures d'imatge activades però la còpia neta desactivada. Cal habilitar <code>clean_copy</code> a la configuració de captures per poder enviar imatges d’aquestes càmeres a Frigate+."
|
||||
},
|
||||
"modelInfo": {
|
||||
"baseModel": "Model base",
|
||||
@ -516,7 +539,8 @@
|
||||
},
|
||||
"loadingAvailableModels": "Carregant models disponibles…",
|
||||
"loading": "Carregant informació del model…",
|
||||
"error": "No s'ha pogut carregar la informació del model"
|
||||
"error": "No s'ha pogut carregar la informació del model",
|
||||
"modelSelect": "Els models disponibles a Frigate+ es poden seleccionar aquí. Tingues en compte que només es poden triar els models compatibles amb la configuració actual del detector."
|
||||
},
|
||||
"apiKey": {
|
||||
"plusLink": "Llegeix més sobre Frigate+",
|
||||
@ -537,7 +561,8 @@
|
||||
"semanticSearch": {
|
||||
"modelSize": {
|
||||
"small": {
|
||||
"title": "petit"
|
||||
"title": "petit",
|
||||
"desc": "L’opció <em>small</em> fa servir una versió quantitzada del model que consumeix menys RAM i s’executa més ràpidament a la CPU, amb una diferència gairebé inapreciable en la qualitat de les incrustacions (embeddings)."
|
||||
},
|
||||
"label": "Mida del model",
|
||||
"large": {
|
||||
@ -574,12 +599,14 @@
|
||||
"desc": "La mida del model utilitzat per al reconeixement facial."
|
||||
},
|
||||
"readTheDocumentation": "Llegir la documentació",
|
||||
"title": "Reconeixement de rostres"
|
||||
"title": "Reconeixement de rostres",
|
||||
"desc": "El reconeixement facial permet a les persones assignar noms i quan es reconeix la seva cara Frigate assignarà el nom de la persona com a subetiqueta. Aquesta informació s'inclou en la interfície d'usuari, filtres, així com en les notificacions."
|
||||
},
|
||||
"unsavedChanges": "Canvis dels paràmetres complementaris sense desar",
|
||||
"licensePlateRecognition": {
|
||||
"readTheDocumentation": "Llegir la documentació",
|
||||
"title": "Reconeixement de matrícules"
|
||||
"title": "Reconeixement de matrícules",
|
||||
"desc": "Frigate pot reconèixer les plaques de matrícula en vehicles i afegir automàticament els caràcters detectats al camp de la placa reconeguda o un nom conegut com a sub_etiqueta en objectes que són de tipus cotxe. Un cas d'ús comú pot ser llegir les plaques de matrícula dels cotxes que entren en un lloc o els cotxes que passen per un carrer."
|
||||
},
|
||||
"birdClassification": {
|
||||
"title": "Classificació d'ocells",
|
||||
|
||||
@ -97,7 +97,8 @@
|
||||
"storageUsed": "Emmagatzematge",
|
||||
"title": "Emmagatzematge de càmera",
|
||||
"unused": {
|
||||
"title": "Sense utilitzar"
|
||||
"title": "Sense utilitzar",
|
||||
"tips": "Aquest valor pot no de forma exacta representar l'espai lliure disponible a Frigate si tens altres fitxers emmagatzemats en la vostra unitat més enllà dels registres de Frigate. Frigate no rastreja l'ús d'emmagatzematge extern als seus registres."
|
||||
},
|
||||
"percentageOfTotalUsed": "Percentatge del total"
|
||||
},
|
||||
|
||||
@ -41,7 +41,7 @@
|
||||
"aria": "Vybrat trénink"
|
||||
},
|
||||
"description": {
|
||||
"addFace": "Průvodce přidáním nové kolekce do Knihovny obličejů.",
|
||||
"addFace": "Prúvodce přidání nové kolekce do Knižnice obličejů.",
|
||||
"placeholder": "Zadejte název pro tuto kolekci",
|
||||
"invalidName": "Neplatný název. Názvy mohou obsahovat pouze písmena, čísla, mezery, apostrofy, podtržítka a pomlčky."
|
||||
},
|
||||
|
||||
@ -623,7 +623,7 @@
|
||||
"title": "Vogel Klassifizierung",
|
||||
"desc": "Die Vogelklassifizierung identifiziert bekannte Vögel mithilfe eines quantisierten Tensorflow-Modells. Wenn ein bekannter Vogel erkannt wird, wird sein allgemeiner Name als sub_label hinzugefügt. Diese Informationen sind in der Benutzeroberfläche, in Filtern und in Benachrichtigungen enthalten."
|
||||
},
|
||||
"title": "Verbesserugsseinstellungen",
|
||||
"title": "Anreicherungseinstellungen",
|
||||
"unsavedChanges": "Ungesicherte geänderte Verbesserungseinstellungen",
|
||||
"semanticSearch": {
|
||||
"reindexNow": {
|
||||
|
||||
52
web/public/locales/el/audio.json
Normal file
52
web/public/locales/el/audio.json
Normal file
@ -0,0 +1,52 @@
|
||||
{
|
||||
"speech": "Διάλογος",
|
||||
"babbling": "Φλυαρία",
|
||||
"yell": "Φωνές",
|
||||
"bellow": "Κάτω από",
|
||||
"whoop": "Κραυγή",
|
||||
"whispering": "Ψίθυρος",
|
||||
"laughter": "Γέλια",
|
||||
"snicker": "Χαχανιτά",
|
||||
"crying": "Κλάμα",
|
||||
"dog": "Σκύλος",
|
||||
"cat": "Γάτα",
|
||||
"pig": "Γουρούνι",
|
||||
"oink": "Κραυγή Γουρουνιού",
|
||||
"moo": "Μουγκανιτό",
|
||||
"cowbell": "Κουδούνι Αγελάδας",
|
||||
"horse": "Άλογο",
|
||||
"goat": "Κατσίκα",
|
||||
"chicken": "Πρόβατο",
|
||||
"child_singing": "Τραγούδι Παιδιού",
|
||||
"sneeze": "Φτέρνισμα",
|
||||
"sniff": "Όσφρηση",
|
||||
"run": "Τρέξιμο",
|
||||
"shuffle": "Ανακάτεμα",
|
||||
"footsteps": "Βήματα",
|
||||
"chewing": "Μάσημα",
|
||||
"biting": "Δάγκωμα",
|
||||
"bicycle": "Ποδήλατο",
|
||||
"car": "Αυτοκίνητο",
|
||||
"motorcycle": "Μηχανή",
|
||||
"breathing": "Αναπνοή",
|
||||
"snoring": "Ροχαλιτό",
|
||||
"honk": "Κόρνα",
|
||||
"wild_animals": "Άγρια Ζώα",
|
||||
"roaring_cats": "Κραυγές από Γάτες",
|
||||
"roar": "Βρυχηθμός",
|
||||
"bird": "Πουλί",
|
||||
"pigeon": "Περιστέρι",
|
||||
"crow": "Κοράκι",
|
||||
"caw": "Αγελάδα",
|
||||
"owl": "Κουκουβάγια",
|
||||
"flapping_wings": "Φτερούγισμα",
|
||||
"dogs": "Σκυλιά",
|
||||
"rats": "Ποντίκια",
|
||||
"guitar": "Κιθάρα",
|
||||
"electric_guitar": "Ηλεκτρική Κιθάρα",
|
||||
"bass_guitar": "Μπάσο",
|
||||
"acoustic_guitar": "Ακουστική Κιθάρα",
|
||||
"classical_music": "Κλασική Μουσική",
|
||||
"opera": "Όπερα",
|
||||
"electronic_music": "Ηλεκτρονική Μουσική"
|
||||
}
|
||||
8
web/public/locales/el/common.json
Normal file
8
web/public/locales/el/common.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"time": {
|
||||
"untilForTime": "Ως{{time}}",
|
||||
"untilForRestart": "Μέχρι να γίνει επανεκίννηση του Frigate.",
|
||||
"untilRestart": "Μέχρι να γίνει επανεκκίνηση",
|
||||
"justNow": "Μόλις τώρα"
|
||||
}
|
||||
}
|
||||
10
web/public/locales/el/components/auth.json
Normal file
10
web/public/locales/el/components/auth.json
Normal file
@ -0,0 +1,10 @@
|
||||
{
|
||||
"form": {
|
||||
"user": "Όνομα χρήστη",
|
||||
"password": "Κωδικός",
|
||||
"login": "Σύνδεση",
|
||||
"errors": {
|
||||
"usernameRequired": "Απαιτείται όνομα χρήστη"
|
||||
}
|
||||
}
|
||||
}
|
||||
6
web/public/locales/el/components/camera.json
Normal file
6
web/public/locales/el/components/camera.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"group": {
|
||||
"add": "Προσθήκη ομάδας καμερών",
|
||||
"label": "Ομάδες καμερών"
|
||||
}
|
||||
}
|
||||
38
web/public/locales/el/components/dialog.json
Normal file
38
web/public/locales/el/components/dialog.json
Normal file
@ -0,0 +1,38 @@
|
||||
{
|
||||
"restart": {
|
||||
"restarting": {
|
||||
"content": "Αυτή η σελίδα θα φορτώσει ξανά σε {{countdown}} δευτερόλεπτα.",
|
||||
"title": "Το Frigate κάνει επανεκκίνηση",
|
||||
"button": "Αναγκαστική επαναφόρτωση τώρα"
|
||||
},
|
||||
"title": "Είστε σίγουροι ότι θέλετε να επανεκκινήσετε το Frigate;",
|
||||
"button": "Επανεκκίνηση"
|
||||
},
|
||||
"explore": {
|
||||
"plus": {
|
||||
"submitToPlus": {
|
||||
"label": "Υποβολή σε Frigate+",
|
||||
"desc": "Τα αντικείμενα σε τοποθεσίες που θέλετε να αποφύγετε δεν είναι ψευδώς θετικά. Η υποβολή τους ως ψευδώς θετικά θα προκαλέσει σύγχυση στο μοντέλο."
|
||||
},
|
||||
"review": {
|
||||
"question": {
|
||||
"label": "Επιβεβαιώστε αυτήν την ετικέτα για το Frigate Plus",
|
||||
"ask_a": "Είναι αυτό το αντικείμενο <code>{{label}}</code>;",
|
||||
"ask_an": "Είναι αυτό το αντικείμενο <code>{{label}}</code>;",
|
||||
"ask_full": "Είναι αυτό το αντικείμενο <code>{{untranslatedLabel}}</code> ({{translatedLabel}});"
|
||||
},
|
||||
"state": {
|
||||
"submitted": "Υποβλήθηκε"
|
||||
}
|
||||
}
|
||||
},
|
||||
"video": {
|
||||
"viewInHistory": "Προβολή στο Ιστορικό"
|
||||
}
|
||||
},
|
||||
"export": {
|
||||
"time": {
|
||||
"fromTimeline": "Επιλογή από Χρονολόγιο"
|
||||
}
|
||||
}
|
||||
}
|
||||
6
web/public/locales/el/components/filter.json
Normal file
6
web/public/locales/el/components/filter.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"filter": "Φίλτρο",
|
||||
"labels": {
|
||||
"label": "Ετικέτες"
|
||||
}
|
||||
}
|
||||
8
web/public/locales/el/components/icons.json
Normal file
8
web/public/locales/el/components/icons.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"iconPicker": {
|
||||
"selectIcon": "Επιλέξτε ένα εικονίδιο",
|
||||
"search": {
|
||||
"placeholder": "Αναζήτηση εικονιδίου…"
|
||||
}
|
||||
}
|
||||
}
|
||||
10
web/public/locales/el/components/input.json
Normal file
10
web/public/locales/el/components/input.json
Normal file
@ -0,0 +1,10 @@
|
||||
{
|
||||
"button": {
|
||||
"downloadVideo": {
|
||||
"label": "Λήψη βίντεο",
|
||||
"toast": {
|
||||
"success": "Το βίντεο αξιολόγησης έχει αρχίσει να κατεβαίνει."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
42
web/public/locales/el/components/player.json
Normal file
42
web/public/locales/el/components/player.json
Normal file
@ -0,0 +1,42 @@
|
||||
{
|
||||
"noRecordingsFoundForThisTime": "Δεν βρέθηκαν εγγραφές για αυτήν την ώρα",
|
||||
"noPreviewFound": "Δεν βρέθηκε προεπισκόπηση",
|
||||
"noPreviewFoundFor": "Δεν βρέθηκε προεπισκόπηση για {{cameraName}}",
|
||||
"submitFrigatePlus": {
|
||||
"title": "Να υποβληθεί αυτό το καρέ στο Frigate+;",
|
||||
"submit": "Υποβολή"
|
||||
},
|
||||
"livePlayerRequiredIOSVersion": "iOS 17.1 ή μεγαλύτερο χρειάζεται για αυτό τον τύπο του live stream.",
|
||||
"streamOffline": {
|
||||
"title": "Η μετάδοση είναι εκτός λειτουργίας",
|
||||
"desc": "Δεν έχουν ληφθεί καρέ στη ροή {{cameraName}} <code>detect</code>, ελέγξτε τα αρχεία καταγραφής σφαλμάτων"
|
||||
},
|
||||
"cameraDisabled": "Η Κάμερα έχει απενεργοποιηθεί",
|
||||
"stats": {
|
||||
"streamType": {
|
||||
"title": "Τύπος μετάδοσης:",
|
||||
"short": "Τύπος"
|
||||
},
|
||||
"bandwidth": {
|
||||
"title": "Ταχύτητα:",
|
||||
"short": "Ταχύτητα"
|
||||
},
|
||||
"latency": {
|
||||
"title": "Καθυστέρηση:",
|
||||
"value": "{{seconds}} δευτερόλεπτα",
|
||||
"short": {
|
||||
"title": "Καθυστέρηση",
|
||||
"value": "{{seconds}} δευτερόλεπτα"
|
||||
}
|
||||
},
|
||||
"totalFrames": "Συνολικός αριθμός Καρέ:",
|
||||
"droppedFrames": {
|
||||
"title": "Απορριφθέντα καρέ:",
|
||||
"short": {
|
||||
"title": "Απορριφθέντα",
|
||||
"value": "{{droppedFrames}} καρέ"
|
||||
}
|
||||
},
|
||||
"decodedFrames": "Αποκωδικοποιημένα Καρέ:"
|
||||
}
|
||||
}
|
||||
8
web/public/locales/el/objects.json
Normal file
8
web/public/locales/el/objects.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"person": "Άτομο",
|
||||
"bicycle": "Ποδήλατο",
|
||||
"car": "Αυτοκίνητο",
|
||||
"motorcycle": "Μηχανή",
|
||||
"airplane": "Αεροπλάνο",
|
||||
"bird": "Πουλί"
|
||||
}
|
||||
5
web/public/locales/el/views/configEditor.json
Normal file
5
web/public/locales/el/views/configEditor.json
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"documentTitle": "Επεξεργαστής ρυθμίσεων - Frigate",
|
||||
"configEditor": "Επεξεργαστής Ρυθμίσεων",
|
||||
"saveAndRestart": "Αποθήκευση και επανεκκίνηση"
|
||||
}
|
||||
8
web/public/locales/el/views/events.json
Normal file
8
web/public/locales/el/views/events.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"alerts": "Ειδοποιήσεις",
|
||||
"detections": "Εντοπισμοί",
|
||||
"motion": {
|
||||
"label": "Κίνηση",
|
||||
"only": "Κίνηση μόνο"
|
||||
}
|
||||
}
|
||||
3
web/public/locales/el/views/explore.json
Normal file
3
web/public/locales/el/views/explore.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"documentTitle": "Εξερευνήστε - Frigate"
|
||||
}
|
||||
5
web/public/locales/el/views/exports.json
Normal file
5
web/public/locales/el/views/exports.json
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"documentTitle": "Εξαγωγή - Frigate",
|
||||
"search": "Αναζήτηση",
|
||||
"deleteExport": "Διαγραφή εξαγωγής"
|
||||
}
|
||||
10
web/public/locales/el/views/faceLibrary.json
Normal file
10
web/public/locales/el/views/faceLibrary.json
Normal file
@ -0,0 +1,10 @@
|
||||
{
|
||||
"description": {
|
||||
"addFace": "Οδηγός για την προσθήκη μιας νέας συλλογής στη Βιβλιοθήκη Προσώπων.",
|
||||
"placeholder": "Εισαγάγετε ένα όνομα για αυτήν τη συλλογή",
|
||||
"invalidName": "Μη έγκυρο όνομα. Τα ονόματα μπορούν να περιλαμβάνουν γράμματα, αριθμούς, κενό διάστημα, απόστροφο, παύλα, κάτω παύλα."
|
||||
},
|
||||
"details": {
|
||||
"person": "Άτομο"
|
||||
}
|
||||
}
|
||||
6
web/public/locales/el/views/live.json
Normal file
6
web/public/locales/el/views/live.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"documentTitle": "Ζωντανά - Frigate",
|
||||
"twoWayTalk": {
|
||||
"enable": "Ενεργοποίηση αμφίδρομης επικοινωνίας"
|
||||
}
|
||||
}
|
||||
6
web/public/locales/el/views/recording.json
Normal file
6
web/public/locales/el/views/recording.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"filter": "Φίλτρο",
|
||||
"export": "Εξαγωγή",
|
||||
"calendar": "Ημερολόγιο",
|
||||
"filters": "Φίλτρα"
|
||||
}
|
||||
7
web/public/locales/el/views/search.json
Normal file
7
web/public/locales/el/views/search.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"search": "Αναζήτηση",
|
||||
"savedSearches": "Αποθηκευμένες Αναζητήσεις",
|
||||
"button": {
|
||||
"clear": "Εκαθάρηση αναζήτησης"
|
||||
}
|
||||
}
|
||||
7
web/public/locales/el/views/settings.json
Normal file
7
web/public/locales/el/views/settings.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"documentTitle": {
|
||||
"default": "Ρυθμίσεις - Frigate",
|
||||
"authentication": "Ρυθμίσεις ελέγχου ταυτοποίησης - Frigate",
|
||||
"camera": "Ρυθμίσεις Κάμερας - Frigate"
|
||||
}
|
||||
}
|
||||
5
web/public/locales/el/views/system.json
Normal file
5
web/public/locales/el/views/system.json
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"documentTitle": {
|
||||
"cameras": "Στατιστικά Καμερών - Frigate"
|
||||
}
|
||||
}
|
||||
@ -37,5 +37,22 @@
|
||||
"hair_dryer": "Hiustenkuivaaja",
|
||||
"toothbrush": "Hammasharja",
|
||||
"clock": "Kello",
|
||||
"bark": "Haukku"
|
||||
"bark": "Haukku",
|
||||
"chant": "Laulaa",
|
||||
"mantra": "Mantra",
|
||||
"child_singing": "Lapsi laulaa",
|
||||
"synthetic_singing": "Synteettinen laulu",
|
||||
"rapping": "Räppi",
|
||||
"humming": "Humina",
|
||||
"groan": "Notkua",
|
||||
"grunt": "Murahtaa",
|
||||
"whistling": "Vihellys",
|
||||
"breathing": "Hengitys",
|
||||
"wheeze": "Vinkua",
|
||||
"snoring": "Kuorsaus",
|
||||
"gasp": "Haukkua",
|
||||
"pant": "Huohottaa",
|
||||
"snort": "Haukkua",
|
||||
"cough": "Yskä",
|
||||
"sneeze": "Niistää"
|
||||
}
|
||||
|
||||
@ -57,12 +57,15 @@
|
||||
"warning": "Jatkuva suoratoisto voi lisätä kaistanleveyden käyttöä ja suorituskykyongelmia. Käytä varoen."
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"placeholder": "Valitse toiston tyyppi"
|
||||
},
|
||||
"compatibilityMode": {
|
||||
"label": "Yhteensopivuustila",
|
||||
"desc": "Ota tämä vaihtoehto käyttöön vain, jos kamerasi live-suoratoistossa näkyy väriartefakteja ja kuvan oikealla puolella on vinoviiva."
|
||||
}
|
||||
},
|
||||
"stream": "Kuvavirta",
|
||||
"placeholder": "Valitse kuvavirta"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@ -34,7 +34,37 @@
|
||||
"time": {
|
||||
"fromTimeline": "Valitse aikajanalta",
|
||||
"lastHour_one": "Viimeinen tunti",
|
||||
"lastHour_other": "Viimeiset {{count}} tuntia"
|
||||
"lastHour_other": "Viimeiset {{count}} tuntia",
|
||||
"start": {
|
||||
"title": "Aloitusaika",
|
||||
"label": "Valitse aloitusaika"
|
||||
},
|
||||
"end": {
|
||||
"title": "Lopetusaika",
|
||||
"label": "Valitse lopetusaika"
|
||||
},
|
||||
"custom": "Mukautettu"
|
||||
},
|
||||
"name": {
|
||||
"placeholder": "Nimeä vienti"
|
||||
},
|
||||
"select": "Valitse",
|
||||
"export": "Vie",
|
||||
"selectOrExport": "Valitse tai Vie",
|
||||
"toast": {
|
||||
"error": {
|
||||
"failed": "Viennin aloitus epäonnistui: {{error}}",
|
||||
"endTimeMustAfterStartTime": "Lopetusajan pitää olla aloitusajan jälkeen",
|
||||
"noVaildTimeSelected": "Sopivaa aikaikkunaa ei valittuna"
|
||||
},
|
||||
"success": "Vienti käynnistettiin onnistuneesti. Katso tiedosto /export kansiossa."
|
||||
},
|
||||
"fromTimeline": {
|
||||
"saveExport": "Tallenna vienti",
|
||||
"previewExport": "Esikatsele vientiä"
|
||||
}
|
||||
},
|
||||
"streaming": {
|
||||
"label": "Kuvavirta"
|
||||
}
|
||||
}
|
||||
|
||||
@ -26,5 +26,29 @@
|
||||
"title": "Kaikki alueet",
|
||||
"short": "Alueet"
|
||||
}
|
||||
},
|
||||
"timeRange": "Aikaikkuna",
|
||||
"subLabels": {
|
||||
"label": "Alinimikkeet",
|
||||
"all": "Kaikki alinimikkeet"
|
||||
},
|
||||
"score": "Piste",
|
||||
"estimatedSpeed": "Arvioitu nopeus {{unit}}",
|
||||
"features": {
|
||||
"label": "Piirteet",
|
||||
"hasVideoClip": "Videoleike löytyy",
|
||||
"submittedToFrigatePlus": {
|
||||
"label": "Lähetetty Frigate+:aan"
|
||||
},
|
||||
"hasSnapshot": "Tilannekuva löytyy"
|
||||
},
|
||||
"sort": {
|
||||
"label": "Järjestä",
|
||||
"dateAsc": "Päivämäärä (Nouseva)",
|
||||
"dateDesc": "Päivämäärä (Laskeva)",
|
||||
"scoreAsc": "Kohteen pisteet (Nouseva)",
|
||||
"scoreDesc": "Kohteen pisteet (Laskeva)",
|
||||
"speedAsc": "Arvioitu nopeus (Nouseva)",
|
||||
"speedDesc": "Arvioitu nopeus (Laskeva)"
|
||||
}
|
||||
}
|
||||
|
||||
@ -22,7 +22,30 @@
|
||||
"short": "Kaistanleveys"
|
||||
},
|
||||
"latency": {
|
||||
"title": "Latenssi:"
|
||||
"title": "Latenssi:",
|
||||
"value": "{{seconds}} sekuntia",
|
||||
"short": {
|
||||
"value": "{{seconds}} sek",
|
||||
"title": "Latenssi"
|
||||
}
|
||||
},
|
||||
"totalFrames": "Kehyksiä yhteensä:",
|
||||
"droppedFrames": {
|
||||
"title": "Pudotettuja kehyksiä:",
|
||||
"short": {
|
||||
"title": "Pudotettu",
|
||||
"value": "{{droppedFrames}} kehystä"
|
||||
}
|
||||
},
|
||||
"decodedFrames": "Dekoodatut kehykset:",
|
||||
"droppedFrameRate": "Pudotettujen kehysten nopeus:"
|
||||
},
|
||||
"toast": {
|
||||
"success": {
|
||||
"submittedFrigatePlus": "Onnistuneesti lähetetty Frigate+:aan"
|
||||
},
|
||||
"error": {
|
||||
"submitFrigatePlusFailed": "Frigate+:aan lähetys epäonnistui"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,7 +20,8 @@
|
||||
"trackedObjectsProcessed": "Käsitellyt seuratut objektit: ",
|
||||
"thumbnailsEmbedded": "Kuvakkeet sisällytetty: ",
|
||||
"descriptionsEmbedded": "Kuvaukset sisällytetty: "
|
||||
}
|
||||
},
|
||||
"context": "Selausta voidaan käyttää sen jälkeen kun seurattavien kohteiden uudelleenindeksöinti on valmistunut."
|
||||
},
|
||||
"downloadingModels": {
|
||||
"context": "Frigate lataa semanttista hakua varten vaadittavat upotusmallit. Tämä saattaa viedä useamman minuutin, riippuen yhteytesi nopeudesta.",
|
||||
@ -81,5 +82,11 @@
|
||||
"snapshot": "kuvankaappaus",
|
||||
"video": "video",
|
||||
"object_lifecycle": "kohteen elinkaari"
|
||||
},
|
||||
"itemMenu": {
|
||||
"downloadSnapshot": {
|
||||
"label": "Lataa kuvankaappaus",
|
||||
"aria": "Lataa kuvankaappaus"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -13,13 +13,14 @@
|
||||
"faceDesc": "Lisätiedot kohteesta, josta tämä kasvokuva tallennettiin",
|
||||
"person": "Henkilö",
|
||||
"timestamp": "Aikaleima",
|
||||
"subLabelScore": "",
|
||||
"face": ""
|
||||
"subLabelScore": "Alinimikkeen pisteet",
|
||||
"face": "Kasvojen yksityiskohdat"
|
||||
},
|
||||
"documentTitle": "Kasvokirjasto - Frigate",
|
||||
"deleteFaceAttempts": {
|
||||
"desc_one": "Oletko varma, että haluat poistaa {{count}} kasvon? Tätä toimintoa ei voi perua.",
|
||||
"desc_other": "Oletko varma, että haluat poistaa {{count}} kasvoa? Tätä toimintoa ei voi perua."
|
||||
"desc_other": "Oletko varma, että haluat poistaa {{count}} kasvoa? Tätä toimintoa ei voi perua.",
|
||||
"title": "Poista kasvot"
|
||||
},
|
||||
"toast": {
|
||||
"success": {
|
||||
@ -27,5 +28,25 @@
|
||||
"deletedFace_other": "{{count}} kasvoa poistettu onnistuneesti."
|
||||
}
|
||||
},
|
||||
"selectItem": "Valitse {{item}}"
|
||||
"selectItem": "Valitse {{item}}",
|
||||
"train": {
|
||||
"empty": "Ei viimeaikaisia kasvojentunnistusyrityksiä",
|
||||
"title": "Koulutus"
|
||||
},
|
||||
"collections": "Kokoelmat",
|
||||
"steps": {
|
||||
"faceName": "Anna nimi kasvoille",
|
||||
"uploadFace": "Lähetä kasvokuva",
|
||||
"nextSteps": "Seuraavat vaiheet"
|
||||
},
|
||||
"createFaceLibrary": {
|
||||
"title": "Luo kokoelma",
|
||||
"desc": "Luo uusi kokoelma",
|
||||
"new": "Luo uusi kasvo"
|
||||
},
|
||||
"selectFace": "Valitse kasvo",
|
||||
"deleteFaceLibrary": {
|
||||
"title": "Poista nimi",
|
||||
"desc": "Haluatko varmasti poistaa kokoelman {{name}}? Tämä poistaa pysyvästi kaikki liitetyt kasvot."
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user