Compare commits

...

29 Commits

Author SHA1 Message Date
dependabot[bot]
079563a3ee
Merge 35b4339fd1c2c8463b4d5134433ddf03cca5ac98 into a510ea903675327dbb1d752403efbc066fe29272 2025-11-05 15:49:33 -08:00
Nicolas Mowen
a510ea9036
Review card refactor (#20813)
* Use the review card in event timeline popover

* Show review title in review card
2025-11-05 09:48:47 -06:00
Josh Hawkins
e1bc7360ad
Form validation tweaks (#20812)
* Always show ID field when editing a trigger

* use onBlur method for form validation

this will prevent the trigger ID from expanding too soon when a user is typing the friendly name
2025-11-05 09:18:10 -06:00
Josh Hawkins
4638c22c16
UI tweaks (#20811)
* camera wizard input mobile font zooming

* ensure the selected page is visible when navigating via url on mobile

* Filter detail stream to only show items from within the review item

* remove incorrect classes causing extra scroll in detail stream

* change button label

* fix mobile menu button highlight issue

---------

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
2025-11-05 07:49:31 -07:00
Nicolas Mowen
81faa8899d
Classification Improvements (#20807)
* Don't show model selection or back button when in multi select mode

* Add dialog to edit classification models

* Fix header spacing

* Cleanup desktop

* Incrase max number of object classifications

* fix iOS mobile card

* Cleanup
2025-11-05 07:11:12 -07:00
Nicolas Mowen
043bd9e6ee
Fix jetson build (#20808)
* Fix jetson build

* Set numpy version in model wheels

* Use constraint instead

* Simplify
2025-11-05 07:10:56 -07:00
Artem Vladimirov
9f0b6004f2
fix: add pluralization for deletedModel toast message (#20803)
* fix: add pluralization for deletedModel toast message

* revert ru translation

---------

Co-authored-by: Artem Vladimirov <a.vladimirov@small.kz>
2025-11-05 05:02:54 -07:00
Nicolas Mowen
b751228476
Various Tweaks (#20800)
* Fix incorrectly picking start time when date was selected

* Implement shared file locking utility

* Cleanup
2025-11-04 17:06:14 -06:00
Nicolas Mowen
3b2d136665
UI Tweaks (#20791)
* Add tooltip for classification group

* Don't portal upload dialog when not in fullscreen
2025-11-04 10:54:05 -06:00
Josh Hawkins
e7394d0dc1
Form validation tweaks (#20790)
* ensure id field is expanded on form errors

* only validate id field when name field has no errors

* use ref instead

* all numeric is an invalid name
2025-11-04 08:57:47 -06:00
Josh Hawkins
2e288109f4
Review tweaks (#20789)
* use alerts/detections colors for dots and add back blue border

* add alerts/detections colored dot next to event icons

* add margin for border
2025-11-04 08:45:45 -06:00
Josh Hawkins
256817d5c2
Make events summary endpoint DST-aware (#20786) 2025-11-03 17:54:33 -07:00
Nicolas Mowen
84409eab7e
Various fixes (#20785)
* Catch case where detector overflows

* Add more debug logs

* Cleanup

* Adjust no class wording

* Adjustments
2025-11-03 18:42:59 -06:00
Josh Hawkins
9e83888133
Fix recordings summary for DST (#20784)
* make recordings summary endpoints DST aware

* remove unused

* clean up
2025-11-03 17:30:56 -07:00
Abinila Siva
85f7138361
update installation code to hold SDK 2.1 version (#20781) 2025-11-03 13:23:51 -07:00
Nicolas Mowen
fc1cad2872
Adjust LPR packages for licensing (#20780) 2025-11-03 14:11:02 -06:00
Nicolas Mowen
5529432856
Various fixes (#20774)
* Change order of deletion

* Add debug log for camera enabled

* Add more face debug logs

* Set jetson numpy version
2025-11-03 10:05:03 -06:00
Josh Hawkins
59963fc47e
Camera Wizard tweaks (#20773)
* add switch to use go2rtc ffmpeg mode

* i18n

* move testing state outside of button
2025-11-03 08:42:38 -07:00
Nicolas Mowen
31fa87ce73
Correctly remove classification model from config (#20772)
* Correctly remove classification model from config

* Undo

* fix

* Use existing config update API and dynamically remove models that were running

* Set update message for face
2025-11-03 08:01:30 -07:00
Nicolas Mowen
740c618240
Fix review summary for DST (#20770)
* Fix review summary for DST

* Fix
2025-11-03 07:34:47 -06:00
Nicolas Mowen
4f76b34f44
Classification fixes (#20771)
* Fully delete a model

* Fix deletion dialog

* Fix classification back step

* Adjust selection gradient

* Fix

* Fix
2025-11-03 07:34:06 -06:00
Josh Hawkins
d44340eca6
Tracked Object Details pane tweaks (#20762)
* normalize path and points sizes

* fix bounding box display to only show on actual points that have a box

* add support for using snapshots
2025-11-02 06:48:43 -07:00
GuoQing Liu
aff82f809c
feat: add search filter group audio i18n (#20760) 2025-11-02 07:45:24 -06:00
Josh Hawkins
1e50d83d06
create i18n key for list separator and use in zones (#20749) 2025-11-01 12:20:32 -06:00
Josh Hawkins
36fb27ef56
Refactor Tracked Object Details dialog (#20748)
* detail stream settings

* remove old review detail dialog

* change layout

* use detail stream in tracking details

* reusable tabs component

* pass in tabs for desktop

* fix object selection and time updating

* i18n

* aspect fixes

* include tolerance for displaying of path and zone

some browsers (firefox and probably brave) intentionally reduce precision of seeking with currentTime for privacy reasons

* detail stream seeking fixes

* tracking details seeking fixes

* layout tweaks

* add download button back for now

* remove

* remove

* snapshot is now default tab
2025-11-01 09:19:30 -05:00
Nicolas Mowen
9937a7cc3d
Add ability to delete classification models (#20747)
* fix typo

* Add ability to delete classification models
2025-11-01 09:11:24 -05:00
Nicolas Mowen
7aac6b4f21
Don't remove tensorflow on trt (#20743) 2025-10-31 16:17:41 -05:00
Nicolas Mowen
338b681ed0
Various Tweaks (#20742)
* Pull context size from openai models

* Adjust wording based on type of model

* Instruct to not use parenthesis

* Simplify genai config

* Don't use GPU for training
2025-10-31 12:40:31 -06:00
dependabot[bot]
35b4339fd1
Bump @docusaurus/core from 3.8.1 to 3.9.2 in /docs
Bumps [@docusaurus/core](https://github.com/facebook/docusaurus/tree/HEAD/packages/docusaurus) from 3.8.1 to 3.9.2.
- [Release notes](https://github.com/facebook/docusaurus/releases)
- [Changelog](https://github.com/facebook/docusaurus/blob/main/CHANGELOG.md)
- [Commits](https://github.com/facebook/docusaurus/commits/v3.9.2/packages/docusaurus)

---
updated-dependencies:
- dependency-name: "@docusaurus/core"
  dependency-version: 3.9.2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-10-20 12:00:24 +00:00
81 changed files with 7950 additions and 2290 deletions

View File

@ -5,21 +5,27 @@ set -euxo pipefail
SQLITE3_VERSION="3.46.1"
PYSQLITE3_VERSION="0.5.3"
# Install libsqlite3-dev if not present (needed for some base images like NVIDIA TensorRT)
if ! dpkg -l | grep -q libsqlite3-dev; then
echo "Installing libsqlite3-dev for compilation..."
apt-get update && apt-get install -y libsqlite3-dev && rm -rf /var/lib/apt/lists/*
fi
# Fetch the pre-built sqlite amalgamation instead of building from source
if [[ ! -d "sqlite" ]]; then
mkdir sqlite
cd sqlite
# Download the pre-built amalgamation from sqlite.org
# For SQLite 3.46.1, the amalgamation version is 3460100
SQLITE_AMALGAMATION_VERSION="3460100"
wget https://www.sqlite.org/2024/sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}.zip -O sqlite-amalgamation.zip
unzip sqlite-amalgamation.zip
mv sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}/* .
rmdir sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}
rm sqlite-amalgamation.zip
cd ../
fi

View File

@ -2,9 +2,9 @@
set -e
# Download the MxAccl for Frigate github release
wget https://github.com/memryx/mx_accl_frigate/archive/refs/heads/main.zip -O /tmp/mxaccl.zip
wget https://github.com/memryx/mx_accl_frigate/archive/refs/tags/v2.1.0.zip -O /tmp/mxaccl.zip
unzip /tmp/mxaccl.zip -d /tmp
mv /tmp/mx_accl_frigate-main /opt/mx_accl_frigate
mv /tmp/mx_accl_frigate-2.1.0 /opt/mx_accl_frigate
rm /tmp/mxaccl.zip
# Install Python dependencies

View File

@ -56,7 +56,7 @@ pywebpush == 2.0.*
# alpr
pyclipper == 1.3.*
shapely == 2.0.*
Levenshtein==0.26.*
rapidfuzz==3.12.*
# HailoRT Wheels
appdirs==1.4.*
argcomplete==2.0.*

View File

@ -24,10 +24,13 @@ echo "Adding MemryX GPG key and repository..."
wget -qO- https://developer.memryx.com/deb/memryx.asc | sudo tee /etc/apt/trusted.gpg.d/memryx.asc >/dev/null
echo 'deb https://developer.memryx.com/deb stable main' | sudo tee /etc/apt/sources.list.d/memryx.list >/dev/null
# Update and install memx-drivers
echo "Installing memx-drivers..."
# Update and install specific SDK 2.1 packages
echo "Installing MemryX SDK 2.1 packages..."
sudo apt update
sudo apt install -y memx-drivers
sudo apt install -y memx-drivers=2.1.* memx-accl=2.1.* mxa-manager=2.1.*
# Hold packages to prevent automatic upgrades
sudo apt-mark hold memx-drivers memx-accl mxa-manager
# ARM-specific board setup
if [[ "$arch" == "aarch64" || "$arch" == "arm64" ]]; then
@ -37,11 +40,5 @@ fi
echo -e "\n\n\033[1;31mYOU MUST RESTART YOUR COMPUTER NOW\033[0m\n\n"
# Install other runtime packages
packages=("memx-accl" "mxa-manager")
for pkg in "${packages[@]}"; do
echo "Installing $pkg..."
sudo apt install -y "$pkg"
done
echo "MemryX SDK 2.1 installation complete!"
echo "MemryX installation complete!"

View File

@ -21,7 +21,7 @@ FROM deps AS frigate-tensorrt
ARG PIP_BREAK_SYSTEM_PACKAGES
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 uninstall -y onnxruntime tensorflow-cpu \
pip3 uninstall -y onnxruntime \
&& pip3 install -U /deps/trt-wheels/*.whl
COPY --from=rootfs / /

View File

@ -112,7 +112,7 @@ RUN apt-get update \
&& apt-get install -y protobuf-compiler libprotobuf-dev \
&& rm -rf /var/lib/apt/lists/*
RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \
pip3 wheel --wheel-dir=/trt-model-wheels -r /requirements-tensorrt-models.txt
pip3 wheel --wheel-dir=/trt-model-wheels --no-deps -r /requirements-tensorrt-models.txt
FROM wget AS jetson-ffmpeg
ARG DEBIAN_FRONTEND
@ -145,7 +145,8 @@ COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
--mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \
pip3 uninstall -y onnxruntime \
&& pip3 install -U /deps/trt-wheels/*.whl /deps/trt-model-wheels/*.whl \
&& pip3 install -U /deps/trt-wheels/*.whl \
&& pip3 install -U /deps/trt-model-wheels/*.whl \
&& ldconfig
WORKDIR /opt/frigate/

View File

@ -13,7 +13,6 @@ nvidia_cusolver_cu12==11.6.3.*; platform_machine == 'x86_64'
nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64'
nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64'
nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64'
tensorflow==2.19.*; platform_machine == 'x86_64'
onnx==1.16.*; platform_machine == 'x86_64'
onnxruntime-gpu==1.22.*; platform_machine == 'x86_64'
protobuf==3.20.3; platform_machine == 'x86_64'

View File

@ -1 +1,2 @@
cuda-python == 12.6.*; platform_machine == 'aarch64'
numpy == 1.26.*; platform_machine == 'aarch64'

View File

@ -10,7 +10,6 @@ Object classification allows you to train a custom MobileNetV2 classification mo
Object classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
Training the model does briefly use a high amount of system resources for about 13 minutes per training run. On lower-power devices, training may take longer.
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
## Classes

View File

@ -10,7 +10,6 @@ State classification allows you to train a custom MobileNetV2 classification mod
State classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
Training the model does briefly use a high amount of system resources for about 13 minutes per training run. On lower-power devices, training may take longer.
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
## Classes

5245
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -17,7 +17,7 @@
"write-heading-ids": "docusaurus write-heading-ids"
},
"dependencies": {
"@docusaurus/core": "^3.7.0",
"@docusaurus/core": "^3.9.2",
"@docusaurus/plugin-content-docs": "^3.6.3",
"@docusaurus/preset-classic": "^3.7.0",
"@docusaurus/theme-mermaid": "^3.6.3",

View File

@ -37,7 +37,6 @@ from frigate.stats.prometheus import get_metrics, update_metrics
from frigate.util.builtin import (
clean_camera_user_pass,
flatten_config_data,
get_tz_modifiers,
process_config_query_string,
update_yaml_file_bulk,
)
@ -48,6 +47,7 @@ from frigate.util.services import (
restart_frigate,
vainfo_hwaccel,
)
from frigate.util.time import get_tz_modifiers
from frigate.version import VERSION
logger = logging.getLogger(__name__)
@ -403,12 +403,13 @@ def config_set(request: Request, body: AppConfigSetBody):
settings,
)
else:
# Handle nested config updates (e.g., config/classification/custom/{name})
# Generic handling for global config updates
settings = config.get_nested_object(body.update_topic)
if settings:
request.app.config_publisher.publisher.publish(
body.update_topic, settings
)
# Publish None for removal, actual config for add/update
request.app.config_publisher.publisher.publish(
body.update_topic, settings
)
return JSONResponse(
content=(

View File

@ -31,14 +31,14 @@ from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags
from frigate.config import FrigateConfig
from frigate.config.camera import DetectConfig
from frigate.const import CLIPS_DIR, FACE_DIR
from frigate.const import CLIPS_DIR, FACE_DIR, MODEL_CACHE_DIR
from frigate.embeddings import EmbeddingsContext
from frigate.models import Event
from frigate.util.classification import (
collect_object_classification_examples,
collect_state_classification_examples,
)
from frigate.util.path import get_event_snapshot
from frigate.util.file import get_event_snapshot
logger = logging.getLogger(__name__)
@ -804,3 +804,46 @@ async def generate_object_examples(request: Request, body: GenerateObjectExample
content={"success": True, "message": "Example generation completed"},
status_code=200,
)
@router.delete(
"/classification/{name}",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete a classification model",
description="""Deletes a specific classification model and all its associated data.
The name must exist in the classification models. Returns a success message or an error if the name is invalid.""",
)
def delete_classification_model(request: Request, name: str):
config: FrigateConfig = request.app.frigate_config
if name not in config.classification.custom:
return JSONResponse(
content=(
{
"success": False,
"message": f"{name} is not a known classification model.",
}
),
status_code=404,
)
# Delete the classification model's data directory in clips
data_dir = os.path.join(CLIPS_DIR, sanitize_filename(name))
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
# Delete the classification model's files in model_cache
model_dir = os.path.join(MODEL_CACHE_DIR, sanitize_filename(name))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
return JSONResponse(
content=(
{
"success": True,
"message": f"Successfully deleted classification model {name}.",
}
),
status_code=200,
)

View File

@ -2,6 +2,7 @@
import base64
import datetime
import json
import logging
import os
import random
@ -57,8 +58,8 @@ from frigate.const import CLIPS_DIR, TRIGGER_DIR
from frigate.embeddings import EmbeddingsContext
from frigate.models import Event, ReviewSegment, Timeline, Trigger
from frigate.track.object_processing import TrackedObject
from frigate.util.builtin import get_tz_modifiers
from frigate.util.path import get_event_thumbnail_bytes
from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.time import get_dst_transitions, get_tz_modifiers
logger = logging.getLogger(__name__)
@ -813,7 +814,6 @@ def events_summary(
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
tz_name = params.timezone
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(tz_name)
has_clip = params.has_clip
has_snapshot = params.has_snapshot
@ -828,33 +828,91 @@ def events_summary(
if len(clauses) == 0:
clauses.append((True))
groups = (
time_range_query = (
Event.select(
Event.camera,
Event.label,
Event.sub_label,
Event.data,
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Event.start_time, "unixepoch", hour_modifier, minute_modifier
),
).alias("day"),
Event.zones,
fn.COUNT(Event.id).alias("count"),
fn.MIN(Event.start_time).alias("min_time"),
fn.MAX(Event.start_time).alias("max_time"),
)
.where(reduce(operator.and_, clauses) & (Event.camera << allowed_cameras))
.group_by(
Event.camera,
Event.label,
Event.sub_label,
Event.data,
(Event.start_time + seconds_offset).cast("int") / (3600 * 24),
Event.zones,
)
.dicts()
.get()
)
return JSONResponse(content=[e for e in groups.dicts()])
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
if min_time is None or max_time is None:
return JSONResponse(content=[])
dst_periods = get_dst_transitions(tz_name, min_time, max_time)
grouped: dict[tuple, dict] = {}
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
period_groups = (
Event.select(
Event.camera,
Event.label,
Event.sub_label,
Event.data,
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Event.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("day"),
Event.zones,
fn.COUNT(Event.id).alias("count"),
)
.where(
reduce(operator.and_, clauses)
& (Event.camera << allowed_cameras)
& (Event.start_time >= period_start)
& (Event.start_time <= period_end)
)
.group_by(
Event.camera,
Event.label,
Event.sub_label,
Event.data,
(Event.start_time + period_offset).cast("int") / (3600 * 24),
Event.zones,
)
.namedtuples()
)
for g in period_groups:
key = (
g.camera,
g.label,
g.sub_label,
json.dumps(g.data, sort_keys=True) if g.data is not None else None,
g.day,
json.dumps(g.zones, sort_keys=True) if g.zones is not None else None,
)
if key in grouped:
grouped[key]["count"] += int(g.count or 0)
else:
grouped[key] = {
"camera": g.camera,
"label": g.label,
"sub_label": g.sub_label,
"data": g.data,
"day": g.day,
"zones": g.zones,
"count": int(g.count or 0),
}
return JSONResponse(content=list(grouped.values()))
@router.get(

View File

@ -34,7 +34,7 @@ from frigate.record.export import (
PlaybackSourceEnum,
RecordingExporter,
)
from frigate.util.builtin import is_current_hour
from frigate.util.time import is_current_hour
logger = logging.getLogger(__name__)

View File

@ -44,9 +44,9 @@ from frigate.const import (
)
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
from frigate.track.object_processing import TrackedObjectProcessor
from frigate.util.builtin import get_tz_modifiers
from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.image import get_image_from_recording
from frigate.util.path import get_event_thumbnail_bytes
from frigate.util.time import get_dst_transitions
logger = logging.getLogger(__name__)
@ -424,7 +424,6 @@ def all_recordings_summary(
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Returns true/false by day indicating if recordings exist"""
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
cameras = params.cameras
if cameras != "all":
@ -432,41 +431,70 @@ def all_recordings_summary(
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content={})
cameras = ",".join(filtered)
camera_list = list(filtered)
else:
cameras = allowed_cameras
camera_list = allowed_cameras
query = (
time_range_query = (
Recordings.select(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time + seconds_offset,
"unixepoch",
hour_modifier,
minute_modifier,
),
).alias("day")
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
)
.group_by(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time + seconds_offset,
"unixepoch",
hour_modifier,
minute_modifier,
),
)
)
.order_by(Recordings.start_time.desc())
.where(Recordings.camera << camera_list)
.dicts()
.get()
)
if params.cameras != "all":
query = query.where(Recordings.camera << cameras.split(","))
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
recording_days = query.namedtuples()
days = {day.day: True for day in recording_days}
if min_time is None or max_time is None:
return JSONResponse(content={})
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
days: dict[str, bool] = {}
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
period_query = (
Recordings.select(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("day")
)
.where(
(Recordings.camera << camera_list)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
)
)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
for g in period_query:
days[g.day] = True
return JSONResponse(content=days)
@ -476,61 +504,103 @@ def all_recordings_summary(
)
async def recordings_summary(camera_name: str, timezone: str = "utc"):
"""Returns hourly summary for recordings of given camera"""
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(timezone)
recording_groups = (
time_range_query = (
Recordings.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Recordings.start_time, "unixepoch", hour_modifier, minute_modifier
),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
fn.SUM(Recordings.objects).alias("objects"),
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
)
.where(Recordings.camera == camera_name)
.group_by((Recordings.start_time + seconds_offset).cast("int") / 3600)
.order_by(Recordings.start_time.desc())
.namedtuples()
.dicts()
.get()
)
event_groups = (
Event.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Event.start_time, "unixepoch", hour_modifier, minute_modifier
),
).alias("hour"),
fn.COUNT(Event.id).alias("count"),
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
days: dict[str, dict] = {}
if min_time is None or max_time is None:
return JSONResponse(content=list(days.values()))
dst_periods = get_dst_transitions(timezone, min_time, max_time)
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
recording_groups = (
Recordings.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
fn.SUM(Recordings.objects).alias("objects"),
)
.where(
(Recordings.camera == camera_name)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
.where(Event.camera == camera_name, Event.has_clip)
.group_by((Event.start_time + seconds_offset).cast("int") / 3600)
.namedtuples()
)
event_map = {g.hour: g.count for g in event_groups}
event_groups = (
Event.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Event.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.COUNT(Event.id).alias("count"),
)
.where(Event.camera == camera_name, Event.has_clip)
.where(
(Event.start_time >= period_start) & (Event.start_time <= period_end)
)
.group_by((Event.start_time + period_offset).cast("int") / 3600)
.namedtuples()
)
days = {}
event_map = {g.hour: g.count for g in event_groups}
for recording_group in recording_groups:
parts = recording_group.hour.split()
hour = parts[1]
day = parts[0]
events_count = event_map.get(recording_group.hour, 0)
hour_data = {
"hour": hour,
"events": events_count,
"motion": recording_group.motion,
"objects": recording_group.objects,
"duration": round(recording_group.duration),
}
if day not in days:
days[day] = {"events": events_count, "hours": [hour_data], "day": day}
else:
days[day]["events"] += events_count
days[day]["hours"].append(hour_data)
for recording_group in recording_groups:
parts = recording_group.hour.split()
hour = parts[1]
day = parts[0]
events_count = event_map.get(recording_group.hour, 0)
hour_data = {
"hour": hour,
"events": events_count,
"motion": recording_group.motion,
"objects": recording_group.objects,
"duration": round(recording_group.duration),
}
if day in days:
# merge counts if already present (edge-case at DST boundary)
days[day]["events"] += events_count or 0
days[day]["hours"].append(hour_data)
else:
days[day] = {
"events": events_count or 0,
"hours": [hour_data],
"day": day,
}
return JSONResponse(content=list(days.values()))

View File

@ -36,7 +36,7 @@ from frigate.config import FrigateConfig
from frigate.embeddings import EmbeddingsContext
from frigate.models import Recordings, ReviewSegment, UserReviewStatus
from frigate.review.types import SeverityEnum
from frigate.util.builtin import get_tz_modifiers
from frigate.util.time import get_dst_transitions
logger = logging.getLogger(__name__)
@ -197,7 +197,6 @@ async def review_summary(
user_id = current_user["username"]
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
cameras = params.cameras
@ -329,89 +328,135 @@ async def review_summary(
)
clauses.append(reduce(operator.or_, label_clauses))
day_in_seconds = 60 * 60 * 24
last_month_query = (
# Find the time range of available data
time_range_query = (
ReviewSegment.select(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
ReviewSegment.start_time,
"unixepoch",
hour_modifier,
minute_modifier,
),
).alias("day"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_detection"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert),
1,
)
],
0,
)
).alias("total_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection),
1,
)
],
0,
)
).alias("total_detection"),
)
.left_outer_join(
UserReviewStatus,
on=(
(ReviewSegment.id == UserReviewStatus.review_segment)
& (UserReviewStatus.user_id == user_id)
),
fn.MIN(ReviewSegment.start_time).alias("min_time"),
fn.MAX(ReviewSegment.start_time).alias("max_time"),
)
.where(reduce(operator.and_, clauses) if clauses else True)
.group_by(
(ReviewSegment.start_time + seconds_offset).cast("int") / day_in_seconds
)
.order_by(ReviewSegment.start_time.desc())
.dicts()
.get()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
data = {
"last24Hours": last_24_query,
}
for e in last_month_query.dicts().iterator():
data[e["day"]] = e
# If no data, return early
if min_time is None or max_time is None:
return JSONResponse(content=data)
# Get DST transition periods
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
day_in_seconds = 60 * 60 * 24
# Query each DST period separately with the correct offset
for period_start, period_end, period_offset in dst_periods:
# Calculate hour/minute modifiers for this period
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
# Build clauses including time range for this period
period_clauses = clauses.copy()
period_clauses.append(
(ReviewSegment.start_time >= period_start)
& (ReviewSegment.start_time <= period_end)
)
period_query = (
ReviewSegment.select(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
ReviewSegment.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("day"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_detection"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert),
1,
)
],
0,
)
).alias("total_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection),
1,
)
],
0,
)
).alias("total_detection"),
)
.left_outer_join(
UserReviewStatus,
on=(
(ReviewSegment.id == UserReviewStatus.review_segment)
& (UserReviewStatus.user_id == user_id)
),
)
.where(reduce(operator.and_, period_clauses))
.group_by(
(ReviewSegment.start_time + period_offset).cast("int") / day_in_seconds
)
.order_by(ReviewSegment.start_time.desc())
)
# Merge results from this period
for e in period_query.dicts().iterator():
day_key = e["day"]
if day_key in data:
# Merge counts if day already exists (edge case at DST boundary)
data[day_key]["reviewed_alert"] += e["reviewed_alert"] or 0
data[day_key]["reviewed_detection"] += e["reviewed_detection"] or 0
data[day_key]["total_alert"] += e["total_alert"] or 0
data[day_key]["total_detection"] += e["total_detection"] or 0
else:
data[day_key] = e
return JSONResponse(content=data)

View File

@ -14,8 +14,8 @@ from typing import Any, List, Optional, Tuple
import cv2
import numpy as np
from Levenshtein import distance, jaro_winkler
from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
from rapidfuzz.distance import JaroWinkler, Levenshtein
from shapely.geometry import Polygon
from frigate.comms.event_metadata_updater import (
@ -1123,7 +1123,9 @@ class LicensePlateProcessingMixin:
for i, plate in enumerate(plates):
merged = False
for j, cluster in enumerate(clusters):
sims = [jaro_winkler(plate["plate"], v["plate"]) for v in cluster]
sims = [
JaroWinkler.similarity(plate["plate"], v["plate"]) for v in cluster
]
if len(sims) > 0:
avg_sim = sum(sims) / len(sims)
if avg_sim >= self.cluster_threshold:
@ -1500,7 +1502,7 @@ class LicensePlateProcessingMixin:
and current_time - data["last_seen"]
<= self.config.cameras[camera].lpr.expire_time
):
similarity = jaro_winkler(data["plate"], top_plate)
similarity = JaroWinkler.similarity(data["plate"], top_plate)
if similarity >= self.similarity_threshold:
plate_id = existing_id
logger.debug(
@ -1580,7 +1582,8 @@ class LicensePlateProcessingMixin:
for label, plates_list in self.lpr_config.known_plates.items()
if any(
re.match(f"^{plate}$", rep_plate)
or distance(plate, rep_plate) <= self.lpr_config.match_distance
or Levenshtein.distance(plate, rep_plate)
<= self.lpr_config.match_distance
for plate in plates_list
)
),

View File

@ -20,8 +20,8 @@ from frigate.genai import GenAIClient
from frigate.models import Event
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import EventsPerSecond, InferenceSpeed
from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.image import create_thumbnail, ensure_jpeg_bytes
from frigate.util.path import get_event_thumbnail_bytes
if TYPE_CHECKING:
from frigate.embeddings import Embeddings

View File

@ -22,7 +22,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.embeddings.util import ZScoreNormalization
from frigate.models import Event, Trigger
from frigate.util.builtin import cosine_distance
from frigate.util.path import get_event_thumbnail_bytes
from frigate.util.file import get_event_thumbnail_bytes
from ..post.api import PostProcessorApi
from ..types import DataProcessorMetrics

View File

@ -466,6 +466,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
now,
self.labelmap[best_id],
score,
max_files=200,
)
if score < self.model_config.threshold:
@ -529,6 +530,7 @@ def write_classification_attempt(
timestamp: float,
label: str,
score: float,
max_files: int = 100,
) -> None:
if "-" in label:
label = label.replace("-", "_")
@ -544,5 +546,5 @@ def write_classification_attempt(
)
# delete oldest face image if maximum is reached
if len(files) > 100:
if len(files) > max_files:
os.unlink(os.path.join(folder, files[-1]))

View File

@ -166,6 +166,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
camera = obj_data["camera"]
if not self.config.cameras[camera].face_recognition.enabled:
logger.debug(f"Face recognition disabled for camera {camera}, skipping")
return
start = datetime.datetime.now().timestamp()
@ -208,6 +209,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
person_box = obj_data.get("box")
if not person_box:
logger.debug(f"No person box available for {id}")
return
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
@ -233,7 +235,8 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
try:
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
except Exception:
except Exception as e:
logger.debug(f"Failed to convert face frame color for {id}: {e}")
return
else:
# don't run for object without attributes
@ -251,6 +254,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
# no faces detected in this frame
if not face:
logger.debug(f"No face attributes found for {id}")
return
face_box = face.get("box")
@ -274,6 +278,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
res = self.recognizer.classify(face_frame)
if not res:
logger.debug(f"Face recognizer returned no result for {id}")
self.__update_metrics(datetime.datetime.now().timestamp() - start)
return
@ -330,6 +335,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
def handle_request(self, topic, request_data) -> dict[str, Any] | None:
if topic == EmbeddingsRequestEnum.clear_face_classifier.value:
self.recognizer.clear()
return {"success": True, "message": "Face classifier cleared."}
elif topic == EmbeddingsRequestEnum.recognize_face.value:
img = cv2.imdecode(
np.frombuffer(base64.b64decode(request_data["image"]), dtype=np.uint8),

View File

@ -17,6 +17,7 @@ from frigate.detectors.detector_config import (
BaseDetectorConfig,
ModelTypeEnum,
)
from frigate.util.file import FileLock
from frigate.util.model import post_process_yolo
logger = logging.getLogger(__name__)
@ -177,29 +178,6 @@ class MemryXDetector(DetectionApi):
logger.error(f"Failed to initialize MemryX model: {e}")
raise
def _acquire_file_lock(self, lock_path: str, timeout: int = 60, poll: float = 0.2):
"""
Create an exclusive lock file. Blocks (with polling) until it can acquire,
or raises TimeoutError. Uses only stdlib (os.O_EXCL).
"""
start = time.time()
while True:
try:
fd = os.open(lock_path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
os.close(fd)
return
except FileExistsError:
if time.time() - start > timeout:
raise TimeoutError(f"Timeout waiting for lock: {lock_path}")
time.sleep(poll)
def _release_file_lock(self, lock_path: str):
"""Best-effort removal of the lock file."""
try:
os.remove(lock_path)
except FileNotFoundError:
pass
def load_yolo_constants(self):
base = f"{self.cache_dir}/{self.model_folder}"
# constants for yolov9 post-processing
@ -212,9 +190,9 @@ class MemryXDetector(DetectionApi):
os.makedirs(self.cache_dir, exist_ok=True)
lock_path = os.path.join(self.cache_dir, f".{self.model_folder}.lock")
self._acquire_file_lock(lock_path)
lock = FileLock(lock_path, timeout=60)
try:
with lock:
# ---------- CASE 1: user provided a custom model path ----------
if self.memx_model_path:
if not self.memx_model_path.endswith(".zip"):
@ -338,9 +316,6 @@ class MemryXDetector(DetectionApi):
f"Failed to remove downloaded zip {zip_path}: {e}"
)
finally:
self._release_file_lock(lock_path)
def send_input(self, connection_id, tensor_input: np.ndarray):
"""Pre-process (if needed) and send frame to MemryX input queue"""
if tensor_input is None:

View File

@ -29,7 +29,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.models import Event, Trigger
from frigate.types import ModelStatusTypesEnum
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, serialize
from frigate.util.path import get_event_thumbnail_bytes
from frigate.util.file import get_event_thumbnail_bytes
from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding
from .onnx.jina_v2_embedding import JinaV2Embedding

View File

@ -62,8 +62,8 @@ from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum
from frigate.genai import get_genai_client
from frigate.models import Event, Recordings, ReviewSegment, Trigger
from frigate.util.builtin import serialize
from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.image import SharedMemoryFrameManager
from frigate.util.path import get_event_thumbnail_bytes
from .embeddings import Embeddings
@ -158,11 +158,13 @@ class EmbeddingMaintainer(threading.Thread):
self.realtime_processors: list[RealTimeProcessorApi] = []
if self.config.face_recognition.enabled:
logger.debug("Face recognition enabled, initializing FaceRealTimeProcessor")
self.realtime_processors.append(
FaceRealTimeProcessor(
self.config, self.requestor, self.event_metadata_publisher, metrics
)
)
logger.debug("FaceRealTimeProcessor initialized successfully")
if self.config.classification.bird.enabled:
self.realtime_processors.append(
@ -283,44 +285,65 @@ class EmbeddingMaintainer(threading.Thread):
logger.info("Exiting embeddings maintenance...")
def _check_classification_config_updates(self) -> None:
"""Check for classification config updates and add new processors."""
"""Check for classification config updates and add/remove processors."""
topic, model_config = self.classification_config_subscriber.check_for_update()
if topic and model_config:
if topic:
model_name = topic.split("/")[-1]
self.config.classification.custom[model_name] = model_config
# Check if processor already exists
for processor in self.realtime_processors:
if isinstance(
processor,
(
CustomStateClassificationProcessor,
CustomObjectClassificationProcessor,
),
):
if processor.model_config.name == model_name:
logger.debug(
f"Classification processor for model {model_name} already exists, skipping"
if model_config is None:
self.realtime_processors = [
processor
for processor in self.realtime_processors
if not (
isinstance(
processor,
(
CustomStateClassificationProcessor,
CustomObjectClassificationProcessor,
),
)
return
and processor.model_config.name == model_name
)
]
if model_config.state_config is not None:
processor = CustomStateClassificationProcessor(
self.config, model_config, self.requestor, self.metrics
logger.info(
f"Successfully removed classification processor for model: {model_name}"
)
else:
processor = CustomObjectClassificationProcessor(
self.config,
model_config,
self.event_metadata_publisher,
self.metrics,
)
self.config.classification.custom[model_name] = model_config
self.realtime_processors.append(processor)
logger.info(
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
)
# Check if processor already exists
for processor in self.realtime_processors:
if isinstance(
processor,
(
CustomStateClassificationProcessor,
CustomObjectClassificationProcessor,
),
):
if processor.model_config.name == model_name:
logger.debug(
f"Classification processor for model {model_name} already exists, skipping"
)
return
if model_config.state_config is not None:
processor = CustomStateClassificationProcessor(
self.config, model_config, self.requestor, self.metrics
)
else:
processor = CustomObjectClassificationProcessor(
self.config,
model_config,
self.event_metadata_publisher,
self.metrics,
)
self.realtime_processors.append(processor)
logger.info(
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
)
def _process_requests(self) -> None:
"""Process embeddings requests"""
@ -374,7 +397,14 @@ class EmbeddingMaintainer(threading.Thread):
source_type, _, camera, frame_name, data = update
logger.debug(
f"Received update - source_type: {source_type}, camera: {camera}, data label: {data.get('label') if data else 'None'}"
)
if not camera or source_type != EventTypeEnum.tracked_object:
logger.debug(
f"Skipping update - camera: {camera}, source_type: {source_type}"
)
return
if self.config.semantic_search.enabled:
@ -384,6 +414,9 @@ class EmbeddingMaintainer(threading.Thread):
# no need to process updated objects if no processors are active
if len(self.realtime_processors) == 0 and len(self.post_processors) == 0:
logger.debug(
f"No processors active - realtime: {len(self.realtime_processors)}, post: {len(self.post_processors)}"
)
return
# Create our own thumbnail based on the bounding box and the frame time
@ -392,6 +425,7 @@ class EmbeddingMaintainer(threading.Thread):
frame_name, camera_config.frame_shape_yuv
)
except FileNotFoundError:
logger.debug(f"Frame {frame_name} not found for camera {camera}")
pass
if yuv_frame is None:
@ -400,7 +434,11 @@ class EmbeddingMaintainer(threading.Thread):
)
return
logger.debug(
f"Processing {len(self.realtime_processors)} realtime processors for object {data.get('id')} (label: {data.get('label')})"
)
for processor in self.realtime_processors:
logger.debug(f"Calling process_frame on {processor.__class__.__name__}")
processor.process_frame(data, yuv_frame)
for processor in self.post_processors:

View File

@ -12,7 +12,7 @@ from frigate.config import FrigateConfig
from frigate.const import CLIPS_DIR
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.models import Event, Timeline
from frigate.util.path import delete_event_snapshot, delete_event_thumbnail
from frigate.util.file import delete_event_snapshot, delete_event_thumbnail
logger = logging.getLogger(__name__)

View File

@ -114,7 +114,7 @@ Your response MUST be a flat JSON object with:
## Objects in Scene
Each line represents a detection state, not necessarily unique individuals. Objects with names in parentheses (e.g., "Name (person)") are verified identities. Objects without names (e.g., "Person") are detected but not identified.
Each line represents a detection state, not necessarily unique individuals. Parentheses indicate object type or category, use only the name/label in your response, not the parentheses.
**CRITICAL: When you see both recognized and unrecognized entries of the same type (e.g., "Joe (person)" and "Person"), visually count how many distinct people/objects you actually see based on appearance and clothing. If you observe only ONE person throughout the sequence, use ONLY the recognized name (e.g., "Joe"). The same person may be recognized in some frames but not others. Only describe both if you visually see MULTIPLE distinct people with clearly different appearances.**

View File

@ -18,6 +18,7 @@ class OpenAIClient(GenAIClient):
"""Generative AI client for Frigate using OpenAI."""
provider: OpenAI
context_size: Optional[int] = None
def _init_provider(self):
"""Initialize the client."""
@ -69,5 +70,33 @@ class OpenAIClient(GenAIClient):
def get_context_size(self) -> int:
"""Get the context window size for OpenAI."""
# OpenAI GPT-4 Vision models have 128K token context window
return 128000
if self.context_size is not None:
return self.context_size
try:
models = self.provider.models.list()
for model in models.data:
if model.id == self.genai_config.model:
if hasattr(model, "max_model_len") and model.max_model_len:
self.context_size = model.max_model_len
logger.debug(
f"Retrieved context size {self.context_size} for model {self.genai_config.model}"
)
return self.context_size
except Exception as e:
logger.debug(
f"Failed to fetch model context size from API: {e}, using default"
)
# Default to 128K for ChatGPT models, 8K for others
model_name = self.genai_config.model.lower()
if "gpt" in model_name:
self.context_size = 128000
else:
self.context_size = 8192
logger.debug(
f"Using default context size {self.context_size} for model {self.genai_config.model}"
)
return self.context_size

View File

@ -9,6 +9,7 @@ from multiprocessing import Queue, Value
from multiprocessing.synchronize import Event as MpEvent
import numpy as np
import zmq
from frigate.comms.object_detector_signaler import (
ObjectDetectorPublisher,
@ -377,6 +378,15 @@ class RemoteObjectDetector:
if self.stop_event.is_set():
return detections
# Drain any stale detection results from the ZMQ buffer before making a new request
# This prevents reading detection results from a previous request
# NOTE: This should never happen, but can in some rare cases
while True:
try:
self.detector_subscriber.socket.recv_string(flags=zmq.NOBLOCK)
except zmq.Again:
break
# copy input to shared memory
self.np_shm[:] = tensor_input[:]
self.detection_queue.put(self.name)

View File

@ -14,7 +14,8 @@ from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum
from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR
from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus
from frigate.record.util import remove_empty_directories, sync_recordings
from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time
from frigate.util.builtin import clear_and_unlink
from frigate.util.time import get_tomorrow_at_time
logger = logging.getLogger(__name__)

View File

@ -28,7 +28,7 @@ from frigate.ffmpeg_presets import (
parse_preset_hardware_acceleration_encode,
)
from frigate.models import Export, Previews, Recordings
from frigate.util.builtin import is_current_hour
from frigate.util.time import is_current_hour
logger = logging.getLogger(__name__)

View File

@ -15,12 +15,9 @@ from collections.abc import Mapping
from multiprocessing.sharedctypes import Synchronized
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
from zoneinfo import ZoneInfoNotFoundError
import numpy as np
import pytz
from ruamel.yaml import YAML
from tzlocal import get_localzone
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
@ -157,17 +154,6 @@ def load_labels(path: Optional[str], encoding="utf-8", prefill=91):
return labels
def get_tz_modifiers(tz_name: str) -> Tuple[str, str, float]:
seconds_offset = (
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
)
hours_offset = int(seconds_offset / 60 / 60)
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
hour_modifier = f"{hours_offset} hour"
minute_modifier = f"{minutes_offset} minute"
return hour_modifier, minute_modifier, seconds_offset
def to_relative_box(
width: int, height: int, box: Tuple[int, int, int, int]
) -> Tuple[int | float, int | float, int | float, int | float]:
@ -298,34 +284,6 @@ def find_by_key(dictionary, target_key):
return None
def get_tomorrow_at_time(hour: int) -> datetime.datetime:
"""Returns the datetime of the following day at 2am."""
try:
tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1)
except ZoneInfoNotFoundError:
tomorrow = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
days=1
)
logger.warning(
"Using utc for maintenance due to missing or incorrect timezone set"
)
return tomorrow.replace(hour=hour, minute=0, second=0).astimezone(
datetime.timezone.utc
)
def is_current_hour(timestamp: int) -> bool:
"""Returns if timestamp is in the current UTC hour."""
start_of_next_hour = (
datetime.datetime.now(datetime.timezone.utc).replace(
minute=0, second=0, microsecond=0
)
+ datetime.timedelta(hours=1)
).timestamp()
return timestamp < start_of_next_hour
def clear_and_unlink(file: Path, missing_ok: bool = True) -> None:
"""clear file then unlink to avoid space retained by file descriptors."""
if not missing_ok and not file.exists():

View File

@ -20,8 +20,8 @@ from frigate.const import (
from frigate.log import redirect_output_to_logger
from frigate.models import Event, Recordings, ReviewSegment
from frigate.types import ModelStatusTypesEnum
from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.image import get_image_from_recording
from frigate.util.path import get_event_thumbnail_bytes
from frigate.util.process import FrigateProcess
BATCH_SIZE = 16

View File

@ -384,10 +384,10 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]
new_object_config["genai"] = {}
for key in global_genai.keys():
if key not in ["enabled", "model", "provider", "base_url", "api_key"]:
new_object_config["genai"][key] = global_genai[key]
else:
if key in ["model", "provider", "base_url", "api_key"]:
new_genai_config[key] = global_genai[key]
else:
new_object_config["genai"][key] = global_genai[key]
config["genai"] = new_genai_config

View File

@ -1,7 +1,6 @@
import logging
import os
import threading
import time
from pathlib import Path
from typing import Callable, List
@ -10,40 +9,11 @@ import requests
from frigate.comms.inter_process import InterProcessRequestor
from frigate.const import UPDATE_MODEL_STATE
from frigate.types import ModelStatusTypesEnum
from frigate.util.file import FileLock
logger = logging.getLogger(__name__)
class FileLock:
def __init__(self, path):
self.path = path
self.lock_file = f"{path}.lock"
# we have not acquired the lock yet so it should not exist
if os.path.exists(self.lock_file):
try:
os.remove(self.lock_file)
except Exception:
pass
def acquire(self):
parent_dir = os.path.dirname(self.lock_file)
os.makedirs(parent_dir, exist_ok=True)
while True:
try:
with open(self.lock_file, "x"):
return
except FileExistsError:
time.sleep(0.1)
def release(self):
try:
os.remove(self.lock_file)
except FileNotFoundError:
pass
class ModelDownloader:
def __init__(
self,
@ -81,15 +51,13 @@ class ModelDownloader:
def _download_models(self):
for file_name in self.file_names:
path = os.path.join(self.download_path, file_name)
lock = FileLock(path)
lock_path = f"{path}.lock"
lock = FileLock(lock_path, cleanup_stale_on_init=True)
if not os.path.exists(path):
lock.acquire()
try:
with lock:
if not os.path.exists(path):
self.download_func(path)
finally:
lock.release()
self.requestor.send_data(
UPDATE_MODEL_STATE,

276
frigate/util/file.py Normal file
View File

@ -0,0 +1,276 @@
"""Path and file utilities."""
import base64
import fcntl
import logging
import os
import time
from pathlib import Path
from typing import Optional
import cv2
from numpy import ndarray
from frigate.const import CLIPS_DIR, THUMB_DIR
from frigate.models import Event
logger = logging.getLogger(__name__)
def get_event_thumbnail_bytes(event: Event) -> bytes | None:
if event.thumbnail:
return base64.b64decode(event.thumbnail)
else:
try:
with open(
os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp"), "rb"
) as f:
return f.read()
except Exception:
return None
def get_event_snapshot(event: Event) -> ndarray:
media_name = f"{event.camera}-{event.id}"
return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
### Deletion
def delete_event_images(event: Event) -> bool:
return delete_event_snapshot(event) and delete_event_thumbnail(event)
def delete_event_snapshot(event: Event) -> bool:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
try:
media_path.unlink(missing_ok=True)
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.webp")
media_path.unlink(missing_ok=True)
# also delete clean.png (legacy) for backward compatibility
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
media_path.unlink(missing_ok=True)
return True
except OSError:
return False
def delete_event_thumbnail(event: Event) -> bool:
if event.thumbnail:
return True
else:
Path(os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp")).unlink(
missing_ok=True
)
return True
### File Locking
class FileLock:
"""
A file-based lock for coordinating access to resources across processes.
Uses fcntl.flock() for proper POSIX file locking on Linux. Supports timeouts,
stale lock detection, and can be used as a context manager.
Example:
```python
# Using as a context manager (recommended)
with FileLock("/path/to/resource.lock", timeout=60):
# Critical section
do_something()
# Manual acquisition and release
lock = FileLock("/path/to/resource.lock")
if lock.acquire(timeout=60):
try:
do_something()
finally:
lock.release()
```
Attributes:
lock_path: Path to the lock file
timeout: Maximum time to wait for lock acquisition (seconds)
poll_interval: Time to wait between lock acquisition attempts (seconds)
stale_timeout: Time after which a lock is considered stale (seconds)
"""
def __init__(
self,
lock_path: str | Path,
timeout: int = 300,
poll_interval: float = 1.0,
stale_timeout: int = 600,
cleanup_stale_on_init: bool = False,
):
"""
Initialize a FileLock.
Args:
lock_path: Path to the lock file
timeout: Maximum time to wait for lock acquisition in seconds (default: 300)
poll_interval: Time to wait between lock attempts in seconds (default: 1.0)
stale_timeout: Time after which a lock is considered stale in seconds (default: 600)
cleanup_stale_on_init: Whether to clean up stale locks on initialization (default: False)
"""
self.lock_path = Path(lock_path)
self.timeout = timeout
self.poll_interval = poll_interval
self.stale_timeout = stale_timeout
self._fd: Optional[int] = None
self._acquired = False
if cleanup_stale_on_init:
self._cleanup_stale_lock()
def _cleanup_stale_lock(self) -> bool:
"""
Clean up a stale lock file if it exists and is old.
Returns:
True if lock was cleaned up, False otherwise
"""
try:
if self.lock_path.exists():
# Check if lock file is older than stale_timeout
lock_age = time.time() - self.lock_path.stat().st_mtime
if lock_age > self.stale_timeout:
logger.warning(
f"Removing stale lock file: {self.lock_path} (age: {lock_age:.1f}s)"
)
self.lock_path.unlink()
return True
except Exception as e:
logger.error(f"Error cleaning up stale lock: {e}")
return False
def is_stale(self) -> bool:
"""
Check if the lock file is stale (older than stale_timeout).
Returns:
True if lock is stale, False otherwise
"""
try:
if self.lock_path.exists():
lock_age = time.time() - self.lock_path.stat().st_mtime
return lock_age > self.stale_timeout
except Exception:
pass
return False
def acquire(self, timeout: Optional[int] = None) -> bool:
"""
Acquire the file lock using fcntl.flock().
Args:
timeout: Maximum time to wait for lock in seconds (uses instance timeout if None)
Returns:
True if lock acquired, False if timeout or error
"""
if self._acquired:
logger.warning(f"Lock already acquired: {self.lock_path}")
return True
if timeout is None:
timeout = self.timeout
# Ensure parent directory exists
self.lock_path.parent.mkdir(parents=True, exist_ok=True)
# Clean up stale lock before attempting to acquire
self._cleanup_stale_lock()
try:
self._fd = os.open(self.lock_path, os.O_CREAT | os.O_RDWR)
start_time = time.time()
while time.time() - start_time < timeout:
try:
fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self._acquired = True
logger.debug(f"Acquired lock: {self.lock_path}")
return True
except (OSError, IOError):
# Lock is held by another process
if time.time() - start_time >= timeout:
logger.warning(f"Timeout waiting for lock: {self.lock_path}")
os.close(self._fd)
self._fd = None
return False
time.sleep(self.poll_interval)
# Timeout reached
if self._fd is not None:
os.close(self._fd)
self._fd = None
return False
except Exception as e:
logger.error(f"Error acquiring lock: {e}")
if self._fd is not None:
try:
os.close(self._fd)
except Exception:
pass
self._fd = None
return False
def release(self) -> None:
"""
Release the file lock.
This closes the file descriptor and removes the lock file.
"""
if not self._acquired:
return
try:
# Close file descriptor and release fcntl lock
if self._fd is not None:
try:
fcntl.flock(self._fd, fcntl.LOCK_UN)
os.close(self._fd)
except Exception as e:
logger.warning(f"Error closing lock file descriptor: {e}")
finally:
self._fd = None
# Remove lock file
if self.lock_path.exists():
self.lock_path.unlink()
logger.debug(f"Released lock: {self.lock_path}")
except FileNotFoundError:
# Lock file already removed, that's fine
pass
except Exception as e:
logger.error(f"Error releasing lock: {e}")
finally:
self._acquired = False
def __enter__(self):
"""Context manager entry - acquire the lock."""
if not self.acquire():
raise TimeoutError(f"Failed to acquire lock: {self.lock_path}")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit - release the lock."""
self.release()
return False
def __del__(self):
"""Destructor - ensure lock is released."""
if self._acquired:
self.release()

View File

@ -1,62 +0,0 @@
"""Path utilities."""
import base64
import os
from pathlib import Path
import cv2
from numpy import ndarray
from frigate.const import CLIPS_DIR, THUMB_DIR
from frigate.models import Event
def get_event_thumbnail_bytes(event: Event) -> bytes | None:
if event.thumbnail:
return base64.b64decode(event.thumbnail)
else:
try:
with open(
os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp"), "rb"
) as f:
return f.read()
except Exception:
return None
def get_event_snapshot(event: Event) -> ndarray:
media_name = f"{event.camera}-{event.id}"
return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
### Deletion
def delete_event_images(event: Event) -> bool:
return delete_event_snapshot(event) and delete_event_thumbnail(event)
def delete_event_snapshot(event: Event) -> bool:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
try:
media_path.unlink(missing_ok=True)
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.webp")
media_path.unlink(missing_ok=True)
# also delete clean.png (legacy) for backward compatibility
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
media_path.unlink(missing_ok=True)
return True
except OSError:
return False
def delete_event_thumbnail(event: Event) -> bool:
if event.thumbnail:
return True
else:
Path(os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp")).unlink(
missing_ok=True
)
return True

View File

@ -1,6 +1,5 @@
"""RKNN model conversion utility for Frigate."""
import fcntl
import logging
import os
import subprocess
@ -9,6 +8,8 @@ import time
from pathlib import Path
from typing import Optional
from frigate.util.file import FileLock
logger = logging.getLogger(__name__)
MODEL_TYPE_CONFIGS = {
@ -245,112 +246,6 @@ def convert_onnx_to_rknn(
logger.warning(f"Failed to remove temporary ONNX file: {e}")
def cleanup_stale_lock(lock_file_path: Path) -> bool:
"""
Clean up a stale lock file if it exists and is old.
Args:
lock_file_path: Path to the lock file
Returns:
True if lock was cleaned up, False otherwise
"""
try:
if lock_file_path.exists():
# Check if lock file is older than 10 minutes (stale)
lock_age = time.time() - lock_file_path.stat().st_mtime
if lock_age > 600: # 10 minutes
logger.warning(
f"Removing stale lock file: {lock_file_path} (age: {lock_age:.1f}s)"
)
lock_file_path.unlink()
return True
except Exception as e:
logger.error(f"Error cleaning up stale lock: {e}")
return False
def acquire_conversion_lock(lock_file_path: Path, timeout: int = 300) -> bool:
"""
Acquire a file-based lock for model conversion.
Args:
lock_file_path: Path to the lock file
timeout: Maximum time to wait for lock in seconds
Returns:
True if lock acquired, False if timeout or error
"""
try:
lock_file_path.parent.mkdir(parents=True, exist_ok=True)
cleanup_stale_lock(lock_file_path)
lock_fd = os.open(lock_file_path, os.O_CREAT | os.O_RDWR)
# Try to acquire exclusive lock
start_time = time.time()
while time.time() - start_time < timeout:
try:
fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
# Lock acquired successfully
logger.debug(f"Acquired conversion lock: {lock_file_path}")
return True
except (OSError, IOError):
# Lock is held by another process, wait and retry
if time.time() - start_time >= timeout:
logger.warning(
f"Timeout waiting for conversion lock: {lock_file_path}"
)
os.close(lock_fd)
return False
logger.debug("Waiting for conversion lock to be released...")
time.sleep(1)
os.close(lock_fd)
return False
except Exception as e:
logger.error(f"Error acquiring conversion lock: {e}")
return False
def release_conversion_lock(lock_file_path: Path) -> None:
"""
Release the conversion lock.
Args:
lock_file_path: Path to the lock file
"""
try:
if lock_file_path.exists():
lock_file_path.unlink()
logger.debug(f"Released conversion lock: {lock_file_path}")
except Exception as e:
logger.error(f"Error releasing conversion lock: {e}")
def is_lock_stale(lock_file_path: Path, max_age: int = 600) -> bool:
"""
Check if a lock file is stale (older than max_age seconds).
Args:
lock_file_path: Path to the lock file
max_age: Maximum age in seconds before considering lock stale
Returns:
True if lock is stale, False otherwise
"""
try:
if lock_file_path.exists():
lock_age = time.time() - lock_file_path.stat().st_mtime
return lock_age > max_age
except Exception:
pass
return False
def wait_for_conversion_completion(
model_type: str, rknn_path: Path, lock_file_path: Path, timeout: int = 300
) -> bool:
@ -358,6 +253,7 @@ def wait_for_conversion_completion(
Wait for another process to complete the conversion.
Args:
model_type: Type of model being converted
rknn_path: Path to the expected RKNN model
lock_file_path: Path to the lock file to monitor
timeout: Maximum time to wait in seconds
@ -366,6 +262,8 @@ def wait_for_conversion_completion(
True if RKNN model appears, False if timeout
"""
start_time = time.time()
lock = FileLock(lock_file_path, stale_timeout=600)
while time.time() - start_time < timeout:
# Check if RKNN model appeared
if rknn_path.exists():
@ -385,11 +283,14 @@ def wait_for_conversion_completion(
return False
# Check if lock is stale
if is_lock_stale(lock_file_path):
if lock.is_stale():
logger.warning("Lock file is stale, attempting to clean up and retry...")
cleanup_stale_lock(lock_file_path)
lock._cleanup_stale_lock()
# Try to acquire lock again
if acquire_conversion_lock(lock_file_path, timeout=60):
retry_lock = FileLock(
lock_file_path, timeout=60, cleanup_stale_on_init=True
)
if retry_lock.acquire():
try:
# Check if RKNN file appeared while waiting
if rknn_path.exists():
@ -415,7 +316,7 @@ def wait_for_conversion_completion(
return False
finally:
release_conversion_lock(lock_file_path)
retry_lock.release()
logger.debug("Waiting for RKNN model to appear...")
time.sleep(1)
@ -452,8 +353,9 @@ def auto_convert_model(
return str(rknn_path)
lock_file_path = base_path.parent / f"{base_name}.conversion.lock"
lock = FileLock(lock_file_path, timeout=300, cleanup_stale_on_init=True)
if acquire_conversion_lock(lock_file_path):
if lock.acquire():
try:
if rknn_path.exists():
logger.info(
@ -476,7 +378,7 @@ def auto_convert_model(
return None
finally:
release_conversion_lock(lock_file_path)
lock.release()
else:
logger.info(
f"Another process is converting {model_path}, waiting for completion..."

100
frigate/util/time.py Normal file
View File

@ -0,0 +1,100 @@
"""Time utilities."""
import datetime
import logging
from typing import Tuple
from zoneinfo import ZoneInfoNotFoundError
import pytz
from tzlocal import get_localzone
logger = logging.getLogger(__name__)
def get_tz_modifiers(tz_name: str) -> Tuple[str, str, float]:
seconds_offset = (
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
)
hours_offset = int(seconds_offset / 60 / 60)
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
hour_modifier = f"{hours_offset} hour"
minute_modifier = f"{minutes_offset} minute"
return hour_modifier, minute_modifier, seconds_offset
def get_tomorrow_at_time(hour: int) -> datetime.datetime:
"""Returns the datetime of the following day at 2am."""
try:
tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1)
except ZoneInfoNotFoundError:
tomorrow = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
days=1
)
logger.warning(
"Using utc for maintenance due to missing or incorrect timezone set"
)
return tomorrow.replace(hour=hour, minute=0, second=0).astimezone(
datetime.timezone.utc
)
def is_current_hour(timestamp: int) -> bool:
"""Returns if timestamp is in the current UTC hour."""
start_of_next_hour = (
datetime.datetime.now(datetime.timezone.utc).replace(
minute=0, second=0, microsecond=0
)
+ datetime.timedelta(hours=1)
).timestamp()
return timestamp < start_of_next_hour
def get_dst_transitions(
tz_name: str, start_time: float, end_time: float
) -> list[tuple[float, float]]:
"""
Find DST transition points and return time periods with consistent offsets.
Args:
tz_name: Timezone name (e.g., 'America/New_York')
start_time: Start timestamp (UTC)
end_time: End timestamp (UTC)
Returns:
List of (period_start, period_end, seconds_offset) tuples representing
continuous periods with the same UTC offset
"""
try:
tz = pytz.timezone(tz_name)
except pytz.UnknownTimeZoneError:
# If timezone is invalid, return single period with no offset
return [(start_time, end_time, 0)]
periods = []
current = start_time
# Get initial offset
dt = datetime.datetime.utcfromtimestamp(current).replace(tzinfo=pytz.UTC)
local_dt = dt.astimezone(tz)
prev_offset = local_dt.utcoffset().total_seconds()
period_start = start_time
# Check each day for offset changes
while current <= end_time:
dt = datetime.datetime.utcfromtimestamp(current).replace(tzinfo=pytz.UTC)
local_dt = dt.astimezone(tz)
current_offset = local_dt.utcoffset().total_seconds()
if current_offset != prev_offset:
# Found a transition - close previous period
periods.append((period_start, current, prev_offset))
period_start = current
prev_offset = current_offset
current += 86400 # Check daily
# Add final period
periods.append((period_start, end_time, prev_offset))
return periods

View File

@ -34,7 +34,7 @@ from frigate.ptz.autotrack import ptz_moving_at_frame_time
from frigate.track import ObjectTracker
from frigate.track.norfair_tracker import NorfairTracker
from frigate.track.tracked_object import TrackedObjectAttribute
from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time
from frigate.util.builtin import EventsPerSecond
from frigate.util.image import (
FrameManager,
SharedMemoryFrameManager,
@ -53,6 +53,7 @@ from frigate.util.object import (
reduce_detections,
)
from frigate.util.process import FrigateProcess
from frigate.util.time import get_tomorrow_at_time
logger = logging.getLogger(__name__)

View File

@ -100,7 +100,8 @@
},
"list": {
"two": "{{0}} and {{1}}",
"many": "{{items}}, and {{last}}"
"many": "{{items}}, and {{last}}",
"separatorWithSpace": ", "
},
"field": {
"optional": "Optional",

View File

@ -1,31 +1,53 @@
{
"documentTitle": "Classification Models",
"details": {
"scoreInfo": "Score represents the average classification confidence across all detections of this object."
},
"button": {
"deleteClassificationAttempts": "Delete Classification Images",
"renameCategory": "Rename Class",
"deleteCategory": "Delete Class",
"deleteImages": "Delete Images",
"trainModel": "Train Model"
"trainModel": "Train Model",
"addClassification": "Add Classification",
"deleteModels": "Delete Models",
"editModel": "Edit Model"
},
"toast": {
"success": {
"deletedCategory": "Deleted Class",
"deletedImage": "Deleted Images",
"deletedModel_one": "Successfully deleted {{count}} model",
"deletedModel_other": "Successfully deleted {{count}} models",
"categorizedImage": "Successfully Classified Image",
"trainedModel": "Successfully trained model.",
"trainingModel": "Successfully started model training."
"trainingModel": "Successfully started model training.",
"updatedModel": "Successfully updated model configuration"
},
"error": {
"deleteImageFailed": "Failed to delete: {{errorMessage}}",
"deleteCategoryFailed": "Failed to delete class: {{errorMessage}}",
"deleteModelFailed": "Failed to delete model: {{errorMessage}}",
"categorizeFailed": "Failed to categorize image: {{errorMessage}}",
"trainingFailed": "Failed to start model training: {{errorMessage}}"
"trainingFailed": "Failed to start model training: {{errorMessage}}",
"updateModelFailed": "Failed to update model: {{errorMessage}}"
}
},
"deleteCategory": {
"title": "Delete Class",
"desc": "Are you sure you want to delete the class {{name}}? This will permanently delete all associated images and require re-training the model."
},
"deleteModel": {
"title": "Delete Classification Model",
"single": "Are you sure you want to delete {{name}}? This will permanently delete all associated data including images and training data. This action cannot be undone.",
"desc": "Are you sure you want to delete {{count}} model(s)? This will permanently delete all associated data including images and training data. This action cannot be undone."
},
"edit": {
"title": "Edit Classification Model",
"descriptionState": "Edit the classes for this state classification model. Changes will require retraining the model.",
"descriptionObject": "Edit the object type and classification type for this object classification model.",
"stateClassesInfo": "Note: Changing state classes requires retraining the model with the updated classes."
},
"deleteDatasetImages": {
"title": "Delete Dataset Images",
"desc": "Are you sure you want to delete {{count}} images from {{dataset}}? This action cannot be undone and will require re-training the model."
@ -52,6 +74,10 @@
},
"categorizeImageAs": "Classify Image As:",
"categorizeImage": "Classify Image",
"menu": {
"objects": "Objects",
"states": "States"
},
"noModels": {
"object": {
"title": "No Object Classification Models",
@ -86,6 +112,7 @@
"classificationSubLabel": "Sub Label",
"classificationAttribute": "Attribute",
"classes": "Classes",
"states": "States",
"classesTip": "Learn about classes",
"classesStateDesc": "Define the different states your camera area can be in. For example: 'open' and 'closed' for a garage door.",
"classesObjectDesc": "Define the different categories to classify detected objects into. For example: 'delivery_person', 'resident', 'stranger' for person classification.",

View File

@ -33,6 +33,7 @@
"type": {
"details": "details",
"snapshot": "snapshot",
"thumbnail": "thumbnail",
"video": "video",
"object_lifecycle": "object lifecycle"
},
@ -41,7 +42,7 @@
"noImageFound": "No image found for this timestamp.",
"createObjectMask": "Create Object Mask",
"adjustAnnotationSettings": "Adjust annotation settings",
"scrollViewTips": "Scroll to view the significant moments of this object's lifecycle.",
"scrollViewTips": "Click to view the significant moments of this object's lifecycle.",
"autoTrackingTips": "Bounding box positions will be inaccurate for autotracking cameras.",
"count": "{{first}} of {{second}}",
"trackedPoint": "Tracked Point",

View File

@ -6,7 +6,8 @@
},
"details": {
"timestamp": "Timestamp",
"unknown": "Unknown"
"unknown": "Unknown",
"scoreInfo": "Score is a weighted average of all face scores, weighted by the size of the face in each image."
},
"documentTitle": "Face Library - Frigate",
"uploadFaceImage": {

View File

@ -271,6 +271,8 @@
"disconnectStream": "Disconnect",
"estimatedBandwidth": "Estimated Bandwidth",
"roles": "Roles",
"ffmpegModule": "Use stream compatibility mode",
"ffmpegModuleDescription": "If the stream does not load after several attempts, try enabling this. When enabled, Frigate will use the ffmpeg module with go2rtc. This may provide better compatibility with some camera streams.",
"none": "None",
"error": "Error",
"streamValidated": "Stream {{number}} validated successfully",

View File

@ -7,11 +7,12 @@ import {
} from "@/types/classification";
import { Event } from "@/types/event";
import { forwardRef, useMemo, useRef, useState } from "react";
import { isDesktop, isMobile } from "react-device-detect";
import { isDesktop, isMobile, isMobileOnly } from "react-device-detect";
import { useTranslation } from "react-i18next";
import TimeAgo from "../dynamic/TimeAgo";
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip";
import { LuSearch } from "react-icons/lu";
import { Popover, PopoverContent, PopoverTrigger } from "../ui/popover";
import { LuSearch, LuInfo } from "react-icons/lu";
import { TooltipPortal } from "@radix-ui/react-tooltip";
import { useNavigate } from "react-router-dom";
import { HiSquare2Stack } from "react-icons/hi2";
@ -181,6 +182,7 @@ type GroupedClassificationCardProps = {
selectedItems: string[];
i18nLibrary: string;
objectType: string;
noClassificationLabel?: string;
onClick: (data: ClassificationItemData | undefined) => void;
children?: (data: ClassificationItemData) => React.ReactNode;
};
@ -190,6 +192,7 @@ export function GroupedClassificationCard({
threshold,
selectedItems,
i18nLibrary,
noClassificationLabel = "details.none",
onClick,
children,
}: GroupedClassificationCardProps) {
@ -222,10 +225,14 @@ export function GroupedClassificationCard({
const bestTyped: ClassificationItemData = best;
return {
...bestTyped,
name: event ? (event.sub_label ?? t("details.unknown")) : bestTyped.name,
name: event
? event.sub_label && event.sub_label !== "none"
? event.sub_label
: t(noClassificationLabel)
: bestTyped.name,
score: event?.data?.sub_label_score || bestTyped.score,
};
}, [group, event, t]);
}, [group, event, noClassificationLabel, t]);
const bestScoreStatus = useMemo(() => {
if (!bestItem?.score || !threshold) {
@ -257,8 +264,8 @@ export function GroupedClassificationCard({
const Overlay = isDesktop ? Dialog : MobilePage;
const Trigger = isDesktop ? DialogTrigger : MobilePageTrigger;
const Header = isDesktop ? DialogHeader : MobilePageHeader;
const Content = isDesktop ? DialogContent : MobilePageContent;
const Header = isDesktop ? DialogHeader : MobilePageHeader;
const ContentTitle = isDesktop ? DialogTitle : MobilePageTitle;
const ContentDescription = isDesktop
? DialogDescription
@ -291,9 +298,9 @@ export function GroupedClassificationCard({
<Trigger asChild></Trigger>
<Content
className={cn(
"",
"scrollbar-container",
isDesktop && "min-w-[50%] max-w-[65%]",
isMobile && "flex flex-col",
isMobile && "overflow-y-auto",
)}
onOpenAutoFocus={(e) => e.preventDefault()}
>
@ -301,26 +308,45 @@ export function GroupedClassificationCard({
<Header
className={cn(
"mx-2 flex flex-row items-center gap-4",
isMobile && "flex-shrink-0",
isMobileOnly && "top-0 mx-4",
)}
>
<div>
<ContentTitle
className={cn(
"flex items-center gap-2 font-normal capitalize",
isMobile && "px-2",
)}
>
{event?.sub_label ? event.sub_label : t("details.unknown")}
{event?.sub_label && (
<div
className={cn(
"",
bestScoreStatus == "match" && "text-success",
bestScoreStatus == "potential" && "text-orange-400",
bestScoreStatus == "unknown" && "text-danger",
)}
>{`${Math.round((event.data.sub_label_score || 0) * 100)}%`}</div>
<div
className={cn(
"",
isMobile && "flex flex-col items-center justify-center",
)}
>
<ContentTitle className="flex items-center gap-2 font-normal capitalize">
{event?.sub_label && event.sub_label !== "none"
? event.sub_label
: t(noClassificationLabel)}
{event?.sub_label && event.sub_label !== "none" && (
<div className="flex items-center gap-1">
<div
className={cn(
"",
bestScoreStatus == "match" && "text-success",
bestScoreStatus == "potential" && "text-orange-400",
bestScoreStatus == "unknown" && "text-danger",
)}
>{`${Math.round((event.data.sub_label_score || 0) * 100)}%`}</div>
<Popover>
<PopoverTrigger asChild>
<button
className="focus:outline-none"
aria-label={t("details.scoreInfo", {
ns: i18nLibrary,
})}
>
<LuInfo className="size-3" />
</button>
</PopoverTrigger>
<PopoverContent className="w-80 text-sm">
{t("details.scoreInfo", { ns: i18nLibrary })}
</PopoverContent>
</Popover>
</div>
)}
</ContentTitle>
<ContentDescription className={cn("", isMobile && "px-2")}>
@ -364,7 +390,7 @@ export function GroupedClassificationCard({
className={cn(
"grid w-full auto-rows-min grid-cols-2 gap-2 sm:grid-cols-3 md:grid-cols-4 lg:grid-cols-6 xl:grid-cols-6 2xl:grid-cols-8",
isDesktop && "p-2",
isMobile && "scrollbar-container flex-1 overflow-y-auto",
isMobile && "px-4 pb-4",
)}
>
{group.map((data: ClassificationItemData) => (

View File

@ -37,6 +37,8 @@ import { capitalizeFirstLetter } from "@/utils/stringUtil";
import { Button, buttonVariants } from "../ui/button";
import { Trans, useTranslation } from "react-i18next";
import { cn } from "@/lib/utils";
import { LuCircle } from "react-icons/lu";
import { MdAutoAwesome } from "react-icons/md";
type ReviewCardProps = {
event: ReviewSegment;
@ -142,7 +144,7 @@ export default function ReviewCard({
className={cn(
"size-full rounded-lg",
activeReviewItem?.id == event.id &&
"outline outline-[3px] outline-offset-1 outline-selected",
"outline outline-[3px] -outline-offset-[2.8px] outline-selected duration-200",
imgLoaded ? "visible" : "invisible",
)}
src={`${baseUrl}${event.thumb_path.replace("/media/frigate/", "")}`}
@ -163,21 +165,33 @@ export default function ReviewCard({
<div className="flex items-center justify-between">
<Tooltip>
<TooltipTrigger asChild>
<div className="flex items-center justify-evenly gap-1">
<>
{event.data.objects.map((object) => {
return getIconForLabel(
object,
"size-3 text-primary dark:text-white",
);
})}
{event.data.audio.map((audio) => {
return getIconForLabel(
audio,
"size-3 text-primary dark:text-white",
);
})}
</>
<div className="flex items-center gap-2">
<LuCircle
className={cn(
"size-2",
event.severity == "alert"
? "fill-severity_alert text-severity_alert"
: "fill-severity_detection text-severity_detection",
)}
/>
<div className="flex items-center gap-1">
{event.data.objects.map((object, idx) => (
<div
key={`${object}-${idx}`}
className="rounded-full bg-muted-foreground p-1"
>
{getIconForLabel(object, "size-3 text-white")}
</div>
))}
{event.data.audio.map((audio, idx) => (
<div
key={`${audio}-${idx}`}
className="rounded-full bg-muted-foreground p-1"
>
{getIconForLabel(audio, "size-3 text-white")}
</div>
))}
</div>
<div className="font-extra-light text-xs">{formattedDate}</div>
</div>
</TooltipTrigger>
@ -204,6 +218,14 @@ export default function ReviewCard({
dense
/>
</div>
{event.data.metadata?.title && (
<div className="flex items-center gap-1.5 rounded bg-secondary/50">
<MdAutoAwesome className="size-3 shrink-0 text-primary" />
<span className="truncate text-xs text-primary">
{event.data.metadata.title}
</span>
</div>
)}
</div>
);

View File

@ -0,0 +1,477 @@
import { Button } from "@/components/ui/button";
import {
Dialog,
DialogContent,
DialogDescription,
DialogHeader,
DialogTitle,
} from "@/components/ui/dialog";
import {
Form,
FormControl,
FormField,
FormItem,
FormLabel,
FormMessage,
} from "@/components/ui/form";
import { Input } from "@/components/ui/input";
import { Label } from "@/components/ui/label";
import { RadioGroup, RadioGroupItem } from "@/components/ui/radio-group";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
import {
CustomClassificationModelConfig,
FrigateConfig,
} from "@/types/frigateConfig";
import { getTranslatedLabel } from "@/utils/i18n";
import { zodResolver } from "@hookform/resolvers/zod";
import axios from "axios";
import { useCallback, useEffect, useMemo, useState } from "react";
import { useForm } from "react-hook-form";
import { useTranslation } from "react-i18next";
import { LuPlus, LuX } from "react-icons/lu";
import { toast } from "sonner";
import useSWR from "swr";
import { z } from "zod";
type ClassificationModelEditDialogProps = {
open: boolean;
model: CustomClassificationModelConfig;
onClose: () => void;
onSuccess: () => void;
};
type ObjectClassificationType = "sub_label" | "attribute";
type ObjectFormData = {
objectLabel: string;
objectType: ObjectClassificationType;
};
type StateFormData = {
classes: string[];
};
export default function ClassificationModelEditDialog({
open,
model,
onClose,
onSuccess,
}: ClassificationModelEditDialogProps) {
const { t } = useTranslation(["views/classificationModel"]);
const { data: config } = useSWR<FrigateConfig>("config");
const [isSaving, setIsSaving] = useState(false);
const isStateModel = model.state_config !== undefined;
const isObjectModel = model.object_config !== undefined;
const objectLabels = useMemo(() => {
if (!config) return [];
const labels = new Set<string>();
Object.values(config.cameras).forEach((cameraConfig) => {
if (!cameraConfig.enabled || !cameraConfig.enabled_in_config) {
return;
}
cameraConfig.objects.track.forEach((label) => {
if (!config.model.all_attributes.includes(label)) {
labels.add(label);
}
});
});
return [...labels].sort();
}, [config]);
// Define form schema based on model type
const formSchema = useMemo(() => {
if (isObjectModel) {
return z.object({
objectLabel: z
.string()
.min(1, t("wizard.step1.errors.objectLabelRequired")),
objectType: z.enum(["sub_label", "attribute"]),
});
} else {
// State model
return z.object({
classes: z
.array(z.string())
.min(1, t("wizard.step1.errors.classRequired"))
.refine(
(classes) => {
const nonEmpty = classes.filter((c) => c.trim().length > 0);
return nonEmpty.length >= 2;
},
{ message: t("wizard.step1.errors.stateRequiresTwoClasses") },
)
.refine(
(classes) => {
const nonEmpty = classes.filter((c) => c.trim().length > 0);
const unique = new Set(nonEmpty.map((c) => c.toLowerCase()));
return unique.size === nonEmpty.length;
},
{ message: t("wizard.step1.errors.classesUnique") },
),
});
}
}, [isObjectModel, t]);
const form = useForm<ObjectFormData | StateFormData>({
resolver: zodResolver(formSchema),
defaultValues: isObjectModel
? ({
objectLabel: model.object_config?.objects?.[0] || "",
objectType:
(model.object_config
?.classification_type as ObjectClassificationType) || "sub_label",
} as ObjectFormData)
: ({
classes: [""], // Will be populated from dataset
} as StateFormData),
mode: "onChange",
});
// Fetch dataset to get current classes for state models
const { data: dataset } = useSWR<{
[id: string]: string[];
}>(isStateModel ? `classification/${model.name}/dataset` : null, {
revalidateOnFocus: false,
});
// Update form with classes from dataset when loaded
useEffect(() => {
if (isStateModel && dataset) {
const classes = Object.keys(dataset).filter((key) => key !== "none");
if (classes.length > 0) {
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
"classes",
classes,
);
}
}
}, [dataset, isStateModel, form]);
const watchedClasses = isStateModel
? (form as ReturnType<typeof useForm<StateFormData>>).watch("classes")
: undefined;
const watchedObjectType = isObjectModel
? (form as ReturnType<typeof useForm<ObjectFormData>>).watch("objectType")
: undefined;
const handleAddClass = useCallback(() => {
const currentClasses = (
form as ReturnType<typeof useForm<StateFormData>>
).getValues("classes");
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
"classes",
[...currentClasses, ""],
{
shouldValidate: true,
},
);
}, [form]);
const handleRemoveClass = useCallback(
(index: number) => {
const currentClasses = (
form as ReturnType<typeof useForm<StateFormData>>
).getValues("classes");
const newClasses = currentClasses.filter((_, i) => i !== index);
// Ensure at least one field remains (even if empty)
if (newClasses.length === 0) {
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
"classes",
[""],
{ shouldValidate: true },
);
} else {
(form as ReturnType<typeof useForm<StateFormData>>).setValue(
"classes",
newClasses,
{ shouldValidate: true },
);
}
},
[form],
);
const onSubmit = useCallback(
async (data: ObjectFormData | StateFormData) => {
setIsSaving(true);
try {
if (isObjectModel) {
const objectData = data as ObjectFormData;
// Update the config
await axios.put("/config/set", {
requires_restart: 0,
update_topic: `config/classification/custom/${model.name}`,
config_data: {
classification: {
custom: {
[model.name]: {
enabled: model.enabled,
name: model.name,
threshold: model.threshold,
object_config: {
objects: [objectData.objectLabel],
classification_type: objectData.objectType,
},
},
},
},
},
});
toast.success(t("toast.success.updatedModel"), {
position: "top-center",
});
} else {
// State model - update classes
// Note: For state models, updating classes requires renaming categories
// which is handled through the dataset API, not the config API
// We'll need to implement this by calling the rename endpoint for each class
// For now, we just show a message that this requires retraining
toast.info(t("edit.stateClassesInfo"), {
position: "top-center",
});
}
onSuccess();
onClose();
} catch (err) {
const error = err as {
response?: { data?: { message?: string; detail?: string } };
};
const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(t("toast.error.updateModelFailed", { errorMessage }), {
position: "top-center",
});
} finally {
setIsSaving(false);
}
},
[isObjectModel, model, t, onSuccess, onClose],
);
const handleCancel = useCallback(() => {
form.reset();
onClose();
}, [form, onClose]);
return (
<Dialog open={open} onOpenChange={(open) => !open && handleCancel()}>
<DialogContent>
<DialogHeader>
<DialogTitle>{t("edit.title")}</DialogTitle>
<DialogDescription>
{isStateModel
? t("edit.descriptionState")
: t("edit.descriptionObject")}
</DialogDescription>
</DialogHeader>
<div className="space-y-6">
<Form {...form}>
<form onSubmit={form.handleSubmit(onSubmit)} className="space-y-4">
{isObjectModel && (
<>
<FormField
control={form.control}
name="objectLabel"
render={({ field }) => (
<FormItem>
<FormLabel className="text-primary-variant">
{t("wizard.step1.objectLabel")}
</FormLabel>
<Select
onValueChange={field.onChange}
defaultValue={field.value}
>
<FormControl>
<SelectTrigger className="h-8">
<SelectValue
placeholder={t(
"wizard.step1.objectLabelPlaceholder",
)}
/>
</SelectTrigger>
</FormControl>
<SelectContent>
{objectLabels.map((label) => (
<SelectItem
key={label}
value={label}
className="cursor-pointer hover:bg-secondary-highlight"
>
{getTranslatedLabel(label)}
</SelectItem>
))}
</SelectContent>
</Select>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="objectType"
render={({ field }) => (
<FormItem>
<FormLabel className="text-primary-variant">
{t("wizard.step1.classificationType")}
</FormLabel>
<FormControl>
<RadioGroup
onValueChange={field.onChange}
defaultValue={field.value}
className="flex flex-col gap-4 pt-2"
>
<div className="flex items-center gap-2">
<RadioGroupItem
className={
watchedObjectType === "sub_label"
? "bg-selected from-selected/50 to-selected/90 text-selected"
: "bg-secondary from-secondary/50 to-secondary/90 text-secondary"
}
id="sub_label"
value="sub_label"
/>
<Label
className="cursor-pointer"
htmlFor="sub_label"
>
{t("wizard.step1.classificationSubLabel")}
</Label>
</div>
<div className="flex items-center gap-2">
<RadioGroupItem
className={
watchedObjectType === "attribute"
? "bg-selected from-selected/50 to-selected/90 text-selected"
: "bg-secondary from-secondary/50 to-secondary/90 text-secondary"
}
id="attribute"
value="attribute"
/>
<Label
className="cursor-pointer"
htmlFor="attribute"
>
{t("wizard.step1.classificationAttribute")}
</Label>
</div>
</RadioGroup>
</FormControl>
<FormMessage />
</FormItem>
)}
/>
</>
)}
{isStateModel && (
<div className="space-y-2">
<div className="flex items-center justify-between">
<FormLabel className="text-primary-variant">
{t("wizard.step1.states")}
</FormLabel>
<Button
type="button"
variant="secondary"
className="size-6 rounded-md bg-secondary-foreground p-1 text-background"
onClick={handleAddClass}
>
<LuPlus />
</Button>
</div>
<div className="space-y-2">
{watchedClasses?.map((_: string, index: number) => (
<FormField
key={index}
control={
(form as ReturnType<typeof useForm<StateFormData>>)
.control
}
name={`classes.${index}` as const}
render={({ field }) => (
<FormItem>
<FormControl>
<div className="flex items-center gap-2">
<Input
className="text-md h-8"
placeholder={t(
"wizard.step1.classPlaceholder",
)}
{...field}
/>
{watchedClasses &&
watchedClasses.length > 1 && (
<Button
type="button"
variant="ghost"
size="sm"
className="h-8 w-8 p-0"
onClick={() => handleRemoveClass(index)}
>
<LuX className="size-4" />
</Button>
)}
</div>
</FormControl>
</FormItem>
)}
/>
))}
</div>
{isStateModel &&
"classes" in form.formState.errors &&
form.formState.errors.classes && (
<p className="text-sm font-medium text-destructive">
{form.formState.errors.classes.message}
</p>
)}
</div>
)}
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
<Button
type="button"
onClick={handleCancel}
className="sm:flex-1"
disabled={isSaving}
>
{t("button.cancel", { ns: "common" })}
</Button>
<Button
type="submit"
variant="select"
className="flex items-center justify-center gap-2 sm:flex-1"
disabled={!form.formState.isValid || isSaving}
>
{isSaving
? t("button.saving", { ns: "common" })
: t("button.save", { ns: "common" })}
</Button>
</div>
</form>
</Form>
</div>
</DialogContent>
</Dialog>
);
}

View File

@ -394,7 +394,9 @@ export default function Step1NameAndDefine({
<div className="flex items-center justify-between">
<div className="flex items-center gap-1">
<FormLabel className="text-primary-variant">
{t("wizard.step1.classes")}
{watchedModelType === "state"
? t("wizard.step1.states")
: t("wizard.step1.classes")}
</FormLabel>
<Popover>
<PopoverTrigger asChild>

View File

@ -317,6 +317,21 @@ export default function Step3ChooseExamples({
return unclassifiedImages.length === 0;
}, [unclassifiedImages]);
const handleBack = useCallback(() => {
if (currentClassIndex > 0) {
const previousClass = allClasses[currentClassIndex - 1];
setCurrentClassIndex((prev) => prev - 1);
// Restore selections for the previous class
const previousSelections = Object.entries(imageClassifications)
.filter(([_, className]) => className === previousClass)
.map(([imageName, _]) => imageName);
setSelectedImages(new Set(previousSelections));
} else {
onBack();
}
}, [currentClassIndex, allClasses, imageClassifications, onBack]);
return (
<div className="flex flex-col gap-6">
{isTraining ? (
@ -420,7 +435,7 @@ export default function Step3ChooseExamples({
{!isTraining && (
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
<Button type="button" onClick={onBack} className="sm:flex-1">
<Button type="button" onClick={handleBack} className="sm:flex-1">
{t("button.back", { ns: "common" })}
</Button>
<Button

View File

@ -348,6 +348,26 @@ export function GeneralFilterContent({
onClose,
}: GeneralFilterContentProps) {
const { t } = useTranslation(["components/filter"]);
const { data: config } = useSWR<FrigateConfig>("config", {
revalidateOnFocus: false,
});
const allAudioListenLabels = useMemo<string[]>(() => {
if (!config) {
return [];
}
const labels = new Set<string>();
Object.values(config.cameras).forEach((camera) => {
if (camera?.audio?.enabled) {
camera.audio.listen.forEach((label) => {
labels.add(label);
});
}
});
return [...labels].sort();
}, [config]);
return (
<>
<div className="overflow-x-hidden">
@ -373,7 +393,10 @@ export function GeneralFilterContent({
{allLabels.map((item) => (
<FilterSwitch
key={item}
label={getTranslatedLabel(item)}
label={getTranslatedLabel(
item,
allAudioListenLabels.includes(item) ? "audio" : "object",
)}
isChecked={currentLabels?.includes(item) ?? false}
onCheckedChange={(isChecked) => {
if (isChecked) {

View File

@ -8,7 +8,7 @@ import {
FormMessage,
} from "@/components/ui/form";
import { Input } from "@/components/ui/input";
import { useState, useEffect } from "react";
import { useState, useEffect, useRef } from "react";
import { useFormContext } from "react-hook-form";
import { generateFixedHash, isValidId } from "@/utils/stringUtil";
import { useTranslation } from "react-i18next";
@ -25,6 +25,7 @@ type NameAndIdFieldsProps<T extends FieldValues = FieldValues> = {
processId?: (name: string) => string;
placeholderName?: string;
placeholderId?: string;
idVisible?: boolean;
};
export default function NameAndIdFields<T extends FieldValues = FieldValues>({
@ -39,10 +40,12 @@ export default function NameAndIdFields<T extends FieldValues = FieldValues>({
processId,
placeholderName,
placeholderId,
idVisible,
}: NameAndIdFieldsProps<T>) {
const { t } = useTranslation(["common"]);
const { watch, setValue, trigger } = useFormContext<T>();
const [isIdVisible, setIsIdVisible] = useState(false);
const { watch, setValue, trigger, formState } = useFormContext<T>();
const [isIdVisible, setIsIdVisible] = useState(idVisible ?? false);
const hasUserTypedRef = useRef(false);
const defaultProcessId = (name: string) => {
const normalized = name.replace(/\s+/g, "_").toLowerCase();
@ -58,6 +61,7 @@ export default function NameAndIdFields<T extends FieldValues = FieldValues>({
useEffect(() => {
const subscription = watch((value, { name }) => {
if (name === nameField) {
hasUserTypedRef.current = true;
const processedId = effectiveProcessId(value[nameField] || "");
setValue(idField, processedId as PathValue<T, Path<T>>);
trigger(idField);
@ -66,6 +70,14 @@ export default function NameAndIdFields<T extends FieldValues = FieldValues>({
return () => subscription.unsubscribe();
}, [watch, setValue, trigger, nameField, idField, effectiveProcessId]);
// Auto-expand if there's an error on the ID field after user has typed
useEffect(() => {
const idError = formState.errors[idField];
if (idError && hasUserTypedRef.current && !isIdVisible) {
setIsIdVisible(true);
}
}, [formState.errors, idField, isIdVisible]);
return (
<>
<FormField

View File

@ -258,6 +258,7 @@ export default function CreateTriggerDialog({
nameLabel={t("triggers.dialog.form.name.title")}
nameDescription={t("triggers.dialog.form.name.description")}
placeholderName={t("triggers.dialog.form.name.placeholder")}
idVisible={!!trigger}
/>
<FormField

View File

@ -13,6 +13,9 @@ import { cn } from "@/lib/utils";
import { useTranslation } from "react-i18next";
import { Event } from "@/types/event";
// Use a small tolerance (10ms) for browsers with seek precision by-design issues
const TOLERANCE = 0.01;
type ObjectTrackOverlayProps = {
camera: string;
showBoundingBoxes?: boolean;
@ -55,6 +58,47 @@ export default function ObjectTrackOverlay({
const effectiveCurrentTime = currentTime - annotationOffset / 1000;
const {
pathStroke,
pointRadius,
pointStroke,
zoneStroke,
boxStroke,
highlightRadius,
} = useMemo(() => {
const BASE_WIDTH = 1280;
const BASE_HEIGHT = 720;
const BASE_PATH_STROKE = 5;
const BASE_POINT_RADIUS = 7;
const BASE_POINT_STROKE = 3;
const BASE_ZONE_STROKE = 5;
const BASE_BOX_STROKE = 5;
const BASE_HIGHLIGHT_RADIUS = 5;
const scale = Math.sqrt(
(videoWidth * videoHeight) / (BASE_WIDTH * BASE_HEIGHT),
);
const pathStroke = Math.max(1, Math.round(BASE_PATH_STROKE * scale));
const pointRadius = Math.max(2, Math.round(BASE_POINT_RADIUS * scale));
const pointStroke = Math.max(1, Math.round(BASE_POINT_STROKE * scale));
const zoneStroke = Math.max(1, Math.round(BASE_ZONE_STROKE * scale));
const boxStroke = Math.max(1, Math.round(BASE_BOX_STROKE * scale));
const highlightRadius = Math.max(
2,
Math.round(BASE_HIGHLIGHT_RADIUS * scale),
);
return {
pathStroke,
pointRadius,
pointStroke,
zoneStroke,
boxStroke,
highlightRadius,
};
}, [videoWidth, videoHeight]);
// Fetch all event data in a single request (CSV ids)
const { data: eventsData } = useSWR<Event[]>(
selectedObjectIds.length > 0
@ -166,41 +210,50 @@ export default function ObjectTrackOverlay({
}) || [];
// show full path once current time has reached the object's start time
const combinedPoints = [...savedPathPoints, ...eventSequencePoints]
.sort((a, b) => a.timestamp - b.timestamp)
.filter(
(point) =>
currentTime >= (eventData?.start_time ?? 0) &&
point.timestamp >= (eventData?.start_time ?? 0) &&
point.timestamp <= (eventData?.end_time ?? Infinity),
);
// event.start_time is in DETECT stream time, so convert it to record stream time for comparison
const eventStartTimeRecord =
(eventData?.start_time ?? 0) + annotationOffset / 1000;
const allPoints = [...savedPathPoints, ...eventSequencePoints].sort(
(a, b) => a.timestamp - b.timestamp,
);
const combinedPoints = allPoints.filter(
(point) =>
currentTime >= eventStartTimeRecord - TOLERANCE &&
point.timestamp <= effectiveCurrentTime + TOLERANCE,
);
// Get color for this object
const label = eventData?.label || "unknown";
const color = getObjectColor(label, objectId);
// Get current zones
// zones (with tolerance for browsers with seek precision by-design issues)
const currentZones =
timelineData
?.filter(
(event: TrackingDetailsSequence) =>
event.timestamp <= effectiveCurrentTime,
event.timestamp <= effectiveCurrentTime + TOLERANCE,
)
.sort(
(a: TrackingDetailsSequence, b: TrackingDetailsSequence) =>
b.timestamp - a.timestamp,
)[0]?.data?.zones || [];
// Get current bounding box
const currentBox = timelineData
?.filter(
(event: TrackingDetailsSequence) =>
event.timestamp <= effectiveCurrentTime && event.data.box,
)
// bounding box - only show if there's a timeline event at/near the current time with a box
// Search all timeline events (not just those before current time) to find one matching the seek position
const nearbyTimelineEvent = timelineData
?.filter((event: TrackingDetailsSequence) => event.data.box)
.sort(
(a: TrackingDetailsSequence, b: TrackingDetailsSequence) =>
b.timestamp - a.timestamp,
)[0]?.data?.box;
Math.abs(a.timestamp - effectiveCurrentTime) -
Math.abs(b.timestamp - effectiveCurrentTime),
)
.find(
(event: TrackingDetailsSequence) =>
Math.abs(event.timestamp - effectiveCurrentTime) <= TOLERANCE,
);
const currentBox = nearbyTimelineEvent?.data?.box;
return {
objectId,
@ -221,6 +274,7 @@ export default function ObjectTrackOverlay({
getObjectColor,
config,
camera,
annotationOffset,
]);
// Collect all zones across all objects
@ -274,9 +328,10 @@ export default function ObjectTrackOverlay({
const handlePointClick = useCallback(
(timestamp: number) => {
onSeekToTime?.(timestamp, false);
// Convert detect stream timestamp to record stream timestamp before seeking
onSeekToTime?.(timestamp + annotationOffset / 1000, false);
},
[onSeekToTime],
[onSeekToTime, annotationOffset],
);
const zonePolygons = useMemo(() => {
@ -324,7 +379,7 @@ export default function ObjectTrackOverlay({
points={zone.points}
fill={zone.fill}
stroke={zone.stroke}
strokeWidth="5"
strokeWidth={zoneStroke}
opacity="0.7"
/>
))}
@ -344,7 +399,7 @@ export default function ObjectTrackOverlay({
d={generateStraightPath(absolutePositions)}
fill="none"
stroke={objData.color}
strokeWidth="5"
strokeWidth={pathStroke}
strokeLinecap="round"
strokeLinejoin="round"
/>
@ -356,13 +411,13 @@ export default function ObjectTrackOverlay({
<circle
cx={pos.x}
cy={pos.y}
r="7"
r={pointRadius}
fill={getPointColor(
objData.color,
pos.lifecycle_item?.class_type,
)}
stroke="white"
strokeWidth="3"
strokeWidth={pointStroke}
style={{ cursor: onSeekToTime ? "pointer" : "default" }}
onClick={() => handlePointClick(pos.timestamp)}
/>
@ -391,7 +446,7 @@ export default function ObjectTrackOverlay({
height={objData.currentBox[3] * videoHeight}
fill="none"
stroke={objData.color}
strokeWidth="5"
strokeWidth={boxStroke}
opacity="0.9"
/>
<circle
@ -403,10 +458,10 @@ export default function ObjectTrackOverlay({
(objData.currentBox[1] + objData.currentBox[3]) *
videoHeight
}
r="5"
r={highlightRadius}
fill="rgb(255, 255, 0)" // yellow highlight
stroke={objData.color}
strokeWidth="5"
strokeWidth={boxStroke}
opacity="1"
/>
</g>

View File

@ -91,8 +91,8 @@ export default function AnnotationOffsetSlider({ className }: Props) {
<div className="w-full flex-1 landscape:flex">
<Slider
value={[annotationOffset]}
min={-1500}
max={1500}
min={-2500}
max={2500}
step={50}
onValueChange={handleChange}
/>

View File

@ -1,577 +0,0 @@
import { isDesktop, isIOS, isMobile } from "react-device-detect";
import {
Sheet,
SheetContent,
SheetDescription,
SheetHeader,
SheetTitle,
} from "../../ui/sheet";
import useSWR from "swr";
import { FrigateConfig } from "@/types/frigateConfig";
import { useFormattedTimestamp } from "@/hooks/use-date-utils";
import { getIconForLabel } from "@/utils/iconUtil";
import { useApiHost } from "@/api";
import {
ReviewDetailPaneType,
ReviewSegment,
ThreatLevel,
} from "@/types/review";
import { Event } from "@/types/event";
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { cn } from "@/lib/utils";
import { FrigatePlusDialog } from "../dialog/FrigatePlusDialog";
import TrackingDetails from "./TrackingDetails";
import Chip from "@/components/indicators/Chip";
import { FaDownload, FaImages, FaShareAlt } from "react-icons/fa";
import FrigatePlusIcon from "@/components/icons/FrigatePlusIcon";
import { FaArrowsRotate } from "react-icons/fa6";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip";
import { useNavigate } from "react-router-dom";
import { Button } from "@/components/ui/button";
import { baseUrl } from "@/api/baseUrl";
import { shareOrCopy } from "@/utils/browserUtil";
import {
MobilePage,
MobilePageContent,
MobilePageDescription,
MobilePageHeader,
MobilePageTitle,
} from "@/components/mobile/MobilePage";
import { DownloadVideoButton } from "@/components/button/DownloadVideoButton";
import { TooltipPortal } from "@radix-ui/react-tooltip";
import { LuSearch } from "react-icons/lu";
import useKeyboardListener from "@/hooks/use-keyboard-listener";
import { Trans, useTranslation } from "react-i18next";
import { getTranslatedLabel } from "@/utils/i18n";
import { CameraNameLabel } from "@/components/camera/CameraNameLabel";
type ReviewDetailDialogProps = {
review?: ReviewSegment;
setReview: (review: ReviewSegment | undefined) => void;
};
export default function ReviewDetailDialog({
review,
setReview,
}: ReviewDetailDialogProps) {
const { t } = useTranslation(["views/explore"]);
const { data: config } = useSWR<FrigateConfig>("config", {
revalidateOnFocus: false,
});
const navigate = useNavigate();
// upload
const [upload, setUpload] = useState<Event>();
// data
const { data: events } = useSWR<Event[]>(
review ? ["event_ids", { ids: review.data.detections.join(",") }] : null,
);
const aiAnalysis = useMemo(() => review?.data?.metadata, [review]);
const aiThreatLevel = useMemo(() => {
if (
!aiAnalysis ||
(!aiAnalysis.potential_threat_level && !aiAnalysis.other_concerns)
) {
return "None";
}
let concerns = "";
switch (aiAnalysis.potential_threat_level) {
case ThreatLevel.SUSPICIOUS:
concerns = `${t("suspiciousActivity", { ns: "views/events" })}\n`;
break;
case ThreatLevel.DANGER:
concerns = `${t("threateningActivity", { ns: "views/events" })}\n`;
break;
}
(aiAnalysis.other_concerns ?? []).forEach((c) => {
concerns += `${c}\n`;
});
return concerns || "None";
}, [aiAnalysis, t]);
const hasMismatch = useMemo(() => {
if (!review || !events) {
return false;
}
return events.length != review?.data.detections.length;
}, [review, events]);
const missingObjects = useMemo(() => {
if (!review || !events) {
return [];
}
const detectedIds = review.data.detections;
const missing = Array.from(
new Set(
events
.filter((event) => !detectedIds.includes(event.id))
.map((event) => event.label),
),
);
return missing;
}, [review, events]);
const formattedDate = useFormattedTimestamp(
review?.start_time ?? 0,
config?.ui.time_format == "24hour"
? t("time.formattedTimestampMonthDayYearHourMinute.24hour", {
ns: "common",
})
: t("time.formattedTimestampMonthDayYearHourMinute.12hour", {
ns: "common",
}),
config?.ui.timezone,
);
// content
const [selectedEvent, setSelectedEvent] = useState<Event>();
const [pane, setPane] = useState<ReviewDetailPaneType>("overview");
// dialog and mobile page
const [isOpen, setIsOpen] = useState(review != undefined);
const handleOpenChange = useCallback(
(open: boolean) => {
setIsOpen(open);
if (!open) {
// short timeout to allow the mobile page animation
// to complete before updating the state
setTimeout(() => {
setReview(undefined);
setSelectedEvent(undefined);
setPane("overview");
}, 300);
}
},
[setReview, setIsOpen],
);
useEffect(() => {
setIsOpen(review != undefined);
// we know that these deps are correct
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [review]);
// keyboard listener
useKeyboardListener(["Esc"], (key, modifiers) => {
if (key == "Esc" && modifiers.down && !modifiers.repeat) {
setIsOpen(false);
}
return true;
});
const Overlay = isDesktop ? Sheet : MobilePage;
const Content = isDesktop ? SheetContent : MobilePageContent;
const Header = isDesktop ? SheetHeader : MobilePageHeader;
const Title = isDesktop ? SheetTitle : MobilePageTitle;
const Description = isDesktop ? SheetDescription : MobilePageDescription;
if (!review) {
return;
}
return (
<>
<Overlay
open={isOpen ?? false}
onOpenChange={handleOpenChange}
enableHistoryBack={true}
>
<FrigatePlusDialog
upload={upload}
onClose={() => setUpload(undefined)}
onEventUploaded={() => {
if (upload) {
upload.plus_id = "new_upload";
}
}}
/>
<Content
className={cn(
"scrollbar-container overflow-y-auto",
isDesktop && pane == "overview"
? "sm:max-w-xl"
: "pt-2 sm:max-w-4xl",
isMobile && "px-4",
)}
>
<span tabIndex={0} className="sr-only" />
{pane == "overview" && (
<Header className="justify-center">
<Title>{t("details.item.title")}</Title>
<Description className="sr-only">
{t("details.item.desc")}
</Description>
<div
className={cn(
"absolute flex gap-2 lg:flex-col",
isDesktop && "right-1 top-8",
isMobile && "right-0 top-3",
)}
>
<Tooltip>
<TooltipTrigger asChild>
<Button
aria-label={t("details.item.button.share")}
size="sm"
onClick={() =>
shareOrCopy(`${baseUrl}review?id=${review.id}`)
}
>
<FaShareAlt className="size-4 text-secondary-foreground" />
</Button>
</TooltipTrigger>
<TooltipPortal>
<TooltipContent>
{t("details.item.button.share")}
</TooltipContent>
</TooltipPortal>
</Tooltip>
<Tooltip>
<TooltipTrigger>
<DownloadVideoButton
source={`${baseUrl}api/${review.camera}/start/${review.start_time}/end/${review.end_time || Date.now() / 1000}/clip.mp4`}
camera={review.camera}
startTime={review.start_time}
/>
</TooltipTrigger>
<TooltipPortal>
<TooltipContent>
{t("button.download", { ns: "common" })}
</TooltipContent>
</TooltipPortal>
</Tooltip>
</div>
</Header>
)}
{pane == "overview" && (
<div className="flex flex-col gap-5 md:mt-3">
{aiAnalysis != undefined && (
<div
className={cn(
"flex h-full w-full flex-col gap-2 rounded-md bg-card p-2",
isDesktop && "m-2 w-[90%]",
)}
>
{t("aiAnalysis.title")}
<div className="text-sm text-primary/40">
{t("details.description.label")}
</div>
<div className="text-sm">{aiAnalysis.scene}</div>
<div className="text-sm text-primary/40">
{t("details.score.label")}
</div>
<div className="text-sm">{aiAnalysis.confidence * 100}%</div>
<div className="text-sm text-primary/40">
{t("concerns.label")}
</div>
<div className="text-sm">{aiThreatLevel}</div>
</div>
)}
<div className="flex w-full flex-row">
<div className="flex w-full flex-col gap-3">
<div className="flex flex-col gap-1.5">
<div className="text-sm text-primary/40">
{t("details.camera")}
</div>
<div className="text-sm smart-capitalize">
<CameraNameLabel camera={review.camera} />
</div>
</div>
<div className="flex flex-col gap-1.5">
<div className="text-sm text-primary/40">
{t("details.timestamp")}
</div>
<div className="text-sm">{formattedDate}</div>
</div>
</div>
<div className="flex w-full flex-col items-center gap-2">
<div className="flex w-full flex-col gap-1.5 lg:pr-8">
<div className="text-sm text-primary/40">
{t("details.objects")}
</div>
<div className="scrollbar-container flex max-h-32 flex-col items-start gap-2 overflow-y-auto text-sm smart-capitalize">
{events?.map((event) => {
return (
<div
key={event.id}
className="flex flex-row items-center gap-2 smart-capitalize"
>
{getIconForLabel(
event.label,
"size-3 text-primary",
)}
{event.sub_label ??
event.label.replaceAll("_", " ")}{" "}
({Math.round(event.data.top_score * 100)}%)
<Tooltip>
<TooltipTrigger>
<div
className="cursor-pointer"
onClick={() => {
navigate(`/explore?event_id=${event.id}`);
}}
>
<LuSearch className="size-4 text-muted-foreground" />
</div>
</TooltipTrigger>
<TooltipPortal>
<TooltipContent>
{t("details.item.button.viewInExplore")}
</TooltipContent>
</TooltipPortal>
</Tooltip>
</div>
);
})}
</div>
</div>
{review.data.zones.length > 0 && (
<div className="scrollbar-container flex max-h-32 w-full flex-col gap-1.5">
<div className="text-sm text-primary/40">
{t("details.zones")}
</div>
<div className="flex flex-col items-start gap-2 text-sm smart-capitalize">
{review.data.zones.map((zone) => {
return (
<div
key={zone}
className="flex flex-row items-center gap-2 smart-capitalize"
>
{zone.replaceAll("_", " ")}
</div>
);
})}
</div>
</div>
)}
</div>
</div>
{hasMismatch && (
<div className="p-4 text-center text-sm">
{(() => {
const detectedCount = Math.abs(
(events?.length ?? 0) -
(review?.data.detections.length ?? 0),
);
return t("details.item.tips.mismatch", {
count: detectedCount,
});
})()}
{missingObjects.length > 0 && (
<div className="mt-2">
<Trans
ns="views/explore"
values={{
objects: missingObjects
.map((x) => getTranslatedLabel(x))
.join(", "),
}}
>
details.item.tips.hasMissingObjects
</Trans>
</div>
)}
</div>
)}
<div className="relative flex size-full flex-col gap-2">
{events?.map((event) => (
<EventItem
key={event.id}
event={event}
setPane={setPane}
setSelectedEvent={setSelectedEvent}
setUpload={setUpload}
/>
))}
</div>
</div>
)}
{pane == "details" && selectedEvent && (
<div className="mt-0 flex size-full flex-col gap-2">
<TrackingDetails event={selectedEvent} setPane={setPane} />
</div>
)}
</Content>
</Overlay>
</>
);
}
type EventItemProps = {
event: Event;
setPane: React.Dispatch<React.SetStateAction<ReviewDetailPaneType>>;
setSelectedEvent: React.Dispatch<React.SetStateAction<Event | undefined>>;
setUpload?: React.Dispatch<React.SetStateAction<Event | undefined>>;
};
function EventItem({
event,
setPane,
setSelectedEvent,
setUpload,
}: EventItemProps) {
const { t } = useTranslation(["views/explore"]);
const { data: config } = useSWR<FrigateConfig>("config", {
revalidateOnFocus: false,
});
const apiHost = useApiHost();
const imgRef = useRef(null);
const [hovered, setHovered] = useState(isMobile);
const navigate = useNavigate();
return (
<>
<div
className={cn(
"relative mr-auto",
!event.has_snapshot && "flex flex-row items-center justify-center",
)}
onMouseEnter={isDesktop ? () => setHovered(true) : undefined}
onMouseLeave={isDesktop ? () => setHovered(false) : undefined}
key={event.id}
>
{event.has_snapshot && (
<>
<div className="pointer-events-none absolute inset-x-0 top-0 h-[30%] w-full rounded-lg bg-gradient-to-b from-black/20 to-transparent md:rounded-2xl"></div>
<div className="pointer-events-none absolute inset-x-0 bottom-0 z-10 h-[10%] w-full rounded-lg bg-gradient-to-t from-black/20 to-transparent md:rounded-2xl"></div>
</>
)}
<img
ref={imgRef}
className={cn(
"select-none rounded-lg object-contain transition-opacity",
)}
style={
isIOS
? {
WebkitUserSelect: "none",
WebkitTouchCallout: "none",
}
: undefined
}
draggable={false}
src={
event.has_snapshot
? `${apiHost}api/events/${event.id}/snapshot.jpg`
: `${apiHost}api/events/${event.id}/thumbnail.webp`
}
/>
{hovered && (
<div>
<div
className={cn("absolute right-1 top-1 flex items-center gap-2")}
>
<Tooltip>
<TooltipTrigger asChild>
<a
download
href={
event.has_snapshot
? `${apiHost}api/events/${event.id}/snapshot.jpg`
: `${apiHost}api/events/${event.id}/thumbnail.webp`
}
>
<Chip className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500">
<FaDownload className="size-4 text-white" />
</Chip>
</a>
</TooltipTrigger>
<TooltipContent>
{t("button.download", { ns: "common" })}
</TooltipContent>
</Tooltip>
{event.has_snapshot &&
event.plus_id == undefined &&
event.data.type == "object" &&
config?.plus.enabled && (
<Tooltip>
<TooltipTrigger>
<Chip
className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500"
onClick={() => {
setUpload?.(event);
}}
>
<FrigatePlusIcon className="size-4 text-white" />
</Chip>
</TooltipTrigger>
<TooltipContent>
{t("itemMenu.submitToPlus.label")}
</TooltipContent>
</Tooltip>
)}
{event.has_clip && (
<Tooltip>
<TooltipTrigger>
<Chip
className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500"
onClick={() => {
setPane("details");
setSelectedEvent(event);
}}
>
<FaArrowsRotate className="size-4 text-white" />
</Chip>
</TooltipTrigger>
<TooltipContent>
{t("itemMenu.viewTrackingDetails.label")}
</TooltipContent>
</Tooltip>
)}
{event.has_snapshot && config?.semantic_search.enabled && (
<Tooltip>
<TooltipTrigger>
<Chip
className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500"
onClick={() => {
navigate(
`/explore?search_type=similarity&event_id=${event.id}`,
);
}}
>
<FaImages className="size-4 text-white" />
</Chip>
</TooltipTrigger>
<TooltipContent>
{t("itemMenu.findSimilar.label")}
</TooltipContent>
</Tooltip>
)}
</div>
</div>
)}
</div>
</>
);
}

View File

@ -31,10 +31,9 @@ import {
FaDownload,
FaHistory,
FaImage,
FaRegListAlt,
FaVideo,
} from "react-icons/fa";
import TrackingDetails from "./TrackingDetails";
import { TrackingDetails } from "./TrackingDetails";
import { DetailStreamProvider } from "@/context/detail-stream-context";
import {
MobilePage,
MobilePageContent,
@ -80,13 +79,9 @@ import { getTranslatedLabel } from "@/utils/i18n";
import { CgTranscript } from "react-icons/cg";
import { CameraNameLabel } from "@/components/camera/CameraNameLabel";
import { PiPath } from "react-icons/pi";
import Heading from "@/components/ui/heading";
const SEARCH_TABS = [
"details",
"snapshot",
"video",
"tracking_details",
] as const;
const SEARCH_TABS = ["snapshot", "tracking_details"] as const;
export type SearchTab = (typeof SEARCH_TABS)[number];
type SearchDetailDialogProps = {
@ -109,6 +104,7 @@ export default function SearchDetailDialog({
const { data: config } = useSWR<FrigateConfig>("config", {
revalidateOnFocus: false,
});
const apiHost = useApiHost();
// tabs
@ -149,16 +145,6 @@ export default function SearchDetailDialog({
const views = [...SEARCH_TABS];
if (!search.has_snapshot) {
const index = views.indexOf("snapshot");
views.splice(index, 1);
}
if (!search.has_clip) {
const index = views.indexOf("video");
views.splice(index, 1);
}
if (search.data.type != "object" || !search.has_clip) {
const index = views.indexOf("tracking_details");
views.splice(index, 1);
@ -173,10 +159,50 @@ export default function SearchDetailDialog({
}
if (!searchTabs.includes(pageToggle)) {
setSearchPage("details");
setSearchPage("snapshot");
}
}, [pageToggle, searchTabs, setSearchPage]);
// Tabs component for reuse
const tabsComponent = (
<ScrollArea className="w-full whitespace-nowrap">
<div className="flex flex-row">
<ToggleGroup
className="*:rounded-md *:px-3 *:py-4"
type="single"
size="sm"
value={pageToggle}
onValueChange={(value: SearchTab) => {
if (value) {
setPageToggle(value);
}
}}
>
{Object.values(searchTabs).map((item) => (
<ToggleGroupItem
key={item}
className={`flex scroll-mx-10 items-center justify-between gap-2 ${pageToggle == item ? "" : "*:text-muted-foreground"}`}
value={item}
data-nav-item={item}
aria-label={`Select ${item}`}
>
{item == "snapshot" && <FaImage className="size-4" />}
{item == "tracking_details" && <PiPath className="size-4" />}
<div className="smart-capitalize">
{item === "snapshot"
? search?.has_snapshot
? t("type.snapshot")
: t("type.thumbnail")
: t(`type.${item}`)}
</div>
</ToggleGroupItem>
))}
</ToggleGroup>
<ScrollBar orientation="horizontal" className="h-0" />
</div>
</ScrollArea>
);
if (!search) {
return;
}
@ -190,92 +216,188 @@ export default function SearchDetailDialog({
const Description = isDesktop ? DialogDescription : MobilePageDescription;
return (
<Overlay
open={isOpen}
onOpenChange={handleOpenChange}
enableHistoryBack={true}
<DetailStreamProvider
isDetailMode={true}
currentTime={(search as unknown as Event)?.start_time ?? 0}
camera={(search as unknown as Event)?.camera ?? ""}
initialSelectedObjectIds={[(search as unknown as Event).id as string]}
>
<Content
className={cn(
"scrollbar-container overflow-y-auto",
isDesktop &&
"max-h-[95dvh] sm:max-w-xl md:max-w-4xl lg:max-w-4xl xl:max-w-7xl",
isMobile && "px-4",
)}
<Overlay
open={isOpen}
onOpenChange={handleOpenChange}
enableHistoryBack={true}
>
<Header>
<Title>{t("trackedObjectDetails")}</Title>
<Description className="sr-only">
{t("trackedObjectDetails")}
</Description>
</Header>
<ScrollArea
className={cn("w-full whitespace-nowrap", isMobile && "my-2")}
<Content
className={cn(
"scrollbar-container overflow-y-auto",
isDesktop &&
"max-h-[95dvh] sm:max-w-xl md:max-w-4xl lg:max-w-4xl xl:max-w-7xl",
isDesktop &&
page == "tracking_details" &&
"lg:max-w-[75%] xl:max-w-[80%]",
isMobile && "px-4",
)}
>
<div className="flex flex-row">
<ToggleGroup
className="*:rounded-md *:px-3 *:py-4"
type="single"
size="sm"
value={pageToggle}
onValueChange={(value: SearchTab) => {
if (value) {
setPageToggle(value);
}
}}
>
{Object.values(searchTabs).map((item) => (
<ToggleGroupItem
key={item}
className={`flex scroll-mx-10 items-center justify-between gap-2 ${page == "details" ? "last:mr-20" : ""} ${pageToggle == item ? "" : "*:text-muted-foreground"}`}
value={item}
data-nav-item={item}
aria-label={`Select ${item}`}
<Header>
<Title>{t("trackedObjectDetails")}</Title>
<Description className="sr-only">
{t("trackedObjectDetails")}
</Description>
</Header>
{isDesktop ? (
page === "tracking_details" ? (
<TrackingDetails
className="size-full"
event={search as unknown as Event}
tabs={tabsComponent}
/>
) : (
<div className="flex h-full gap-4 overflow-hidden">
<div
className={cn(
"scrollbar-container flex-[3] overflow-y-hidden",
page === "snapshot" && !search.has_snapshot && "flex-[2]",
)}
>
{item == "details" && <FaRegListAlt className="size-4" />}
{item == "snapshot" && <FaImage className="size-4" />}
{item == "video" && <FaVideo className="size-4" />}
{item == "tracking_details" && <PiPath className="size-4" />}
<div className="smart-capitalize">{t(`type.${item}`)}</div>
</ToggleGroupItem>
))}
</ToggleGroup>
<ScrollBar orientation="horizontal" className="h-0" />
</div>
</ScrollArea>
{page == "details" && (
<ObjectDetailsTab
search={search}
config={config}
setSearch={setSearch}
setSimilarity={setSimilarity}
setInputFocused={setInputFocused}
/>
)}
{page == "snapshot" && (
<ObjectSnapshotTab
search={
{
...search,
plus_id: config?.plus?.enabled ? search.plus_id : "not_enabled",
} as unknown as Event
}
onEventUploaded={() => {
search.plus_id = "new_upload";
}}
/>
)}
{page == "video" && <VideoTab search={search} />}
{page == "tracking_details" && (
<TrackingDetails
className="w-full overflow-x-hidden"
event={search as unknown as Event}
fullscreen={true}
setPane={() => {}}
/>
)}
</Content>
</Overlay>
{page === "snapshot" && search.has_snapshot && (
<ObjectSnapshotTab
search={
{
...search,
plus_id: config?.plus?.enabled
? search.plus_id
: "not_enabled",
} as unknown as Event
}
onEventUploaded={() => {
search.plus_id = "new_upload";
}}
/>
)}
{page === "snapshot" && !search.has_snapshot && (
<img
className="size-full select-none rounded-lg object-contain transition-opacity"
style={
isIOS
? {
WebkitUserSelect: "none",
WebkitTouchCallout: "none",
}
: undefined
}
draggable={false}
src={`${apiHost}api/events/${search.id}/thumbnail.webp`}
/>
)}
</div>
<div className="flex flex-[2] flex-col gap-4 overflow-hidden">
{tabsComponent}
<div className="scrollbar-container flex-1 overflow-y-auto">
{page == "snapshot" && (
<ObjectDetailsTab
search={search}
config={config}
setSearch={setSearch}
setSimilarity={setSimilarity}
setInputFocused={setInputFocused}
showThumbnail={false}
/>
)}
</div>
</div>
</div>
)
) : (
<>
<ScrollArea
className={cn("w-full whitespace-nowrap", isMobile && "my-2")}
>
<div className="flex flex-row">
<ToggleGroup
className="*:rounded-md *:px-3 *:py-4"
type="single"
size="sm"
value={pageToggle}
onValueChange={(value: SearchTab) => {
if (value) {
setPageToggle(value);
}
}}
>
{Object.values(searchTabs).map((item) => (
<ToggleGroupItem
key={item}
className={`flex scroll-mx-10 items-center justify-between gap-2 ${pageToggle == item ? "" : "*:text-muted-foreground"}`}
value={item}
data-nav-item={item}
aria-label={`Select ${item}`}
>
{item == "snapshot" && <FaImage className="size-4" />}
{item == "tracking_details" && (
<PiPath className="size-4" />
)}
<div className="smart-capitalize">
{t(`type.${item}`)}
</div>
</ToggleGroupItem>
))}
</ToggleGroup>
<ScrollBar orientation="horizontal" className="h-0" />
</div>
</ScrollArea>
{page == "snapshot" && (
<>
{search.has_snapshot && (
<ObjectSnapshotTab
search={
{
...search,
plus_id: config?.plus?.enabled
? search.plus_id
: "not_enabled",
} as unknown as Event
}
onEventUploaded={() => {
search.plus_id = "new_upload";
}}
/>
)}
{page == "snapshot" && !search.has_snapshot && (
<img
className="w-full select-none rounded-lg object-contain transition-opacity"
style={
isIOS
? {
WebkitUserSelect: "none",
WebkitTouchCallout: "none",
}
: undefined
}
draggable={false}
src={`${apiHost}api/events/${search.id}/thumbnail.webp`}
/>
)}
<Heading as="h3" className="mt-2 smart-capitalize">
{t("type.details")}
</Heading>
<ObjectDetailsTab
search={search}
config={config}
setSearch={setSearch}
setSimilarity={setSimilarity}
setInputFocused={setInputFocused}
showThumbnail={false}
/>
</>
)}
{page == "tracking_details" && (
<TrackingDetails event={search as unknown as Event} />
)}
</>
)}
</Content>
</Overlay>
</DetailStreamProvider>
);
}
@ -285,6 +407,7 @@ type ObjectDetailsTabProps = {
setSearch: (search: SearchResult | undefined) => void;
setSimilarity?: () => void;
setInputFocused: React.Dispatch<React.SetStateAction<boolean>>;
showThumbnail?: boolean;
};
function ObjectDetailsTab({
search,
@ -292,6 +415,7 @@ function ObjectDetailsTab({
setSearch,
setSimilarity,
setInputFocused,
showThumbnail = true,
}: ObjectDetailsTabProps) {
const { t } = useTranslation(["views/explore", "views/faceLibrary"]);
@ -873,66 +997,71 @@ function ObjectDetailsTab({
<div className="text-sm">{formattedDate}</div>
</div>
</div>
<div className="flex w-full flex-col gap-2 pl-6">
<img
className="aspect-video select-none rounded-lg object-contain transition-opacity"
style={
isIOS
? {
WebkitUserSelect: "none",
WebkitTouchCallout: "none",
}
: undefined
}
draggable={false}
src={`${apiHost}api/events/${search.id}/thumbnail.webp`}
/>
<div
className={cn("flex w-full flex-row gap-2", isMobile && "flex-col")}
>
{config?.semantic_search.enabled &&
setSimilarity != undefined &&
search.data.type == "object" && (
<Button
{showThumbnail && (
<div className="flex w-full flex-col gap-2 pl-6">
<img
className="aspect-video select-none rounded-lg object-contain transition-opacity"
style={
isIOS
? {
WebkitUserSelect: "none",
WebkitTouchCallout: "none",
}
: undefined
}
draggable={false}
src={`${apiHost}api/events/${search.id}/thumbnail.webp`}
/>
<div
className={cn(
"flex w-full flex-row gap-2",
isMobile && "flex-col",
)}
>
{config?.semantic_search.enabled &&
setSimilarity != undefined &&
search.data.type == "object" && (
<Button
className="w-full"
aria-label={t("itemMenu.findSimilar.aria")}
onClick={() => {
setSearch(undefined);
setSimilarity();
}}
>
<div className="flex gap-1">
<LuSearch />
{t("itemMenu.findSimilar.label")}
</div>
</Button>
)}
{hasFace && (
<FaceSelectionDialog
className="w-full"
aria-label={t("itemMenu.findSimilar.aria")}
onClick={() => {
setSearch(undefined);
setSimilarity();
}}
faceNames={faceNames}
onTrainAttempt={onTrainFace}
>
<div className="flex gap-1">
<LuSearch />
{t("itemMenu.findSimilar.label")}
</div>
</Button>
)}
{hasFace && (
<FaceSelectionDialog
className="w-full"
faceNames={faceNames}
onTrainAttempt={onTrainFace}
>
<Button className="w-full">
<div className="flex gap-1">
<TbFaceId />
{t("trainFace", { ns: "views/faceLibrary" })}
</div>
</Button>
</FaceSelectionDialog>
)}
{config?.cameras[search?.camera].audio_transcription.enabled &&
search?.label == "speech" &&
search?.end_time && (
<Button className="w-full" onClick={onTranscribe}>
<div className="flex gap-1">
<CgTranscript />
{t("itemMenu.audioTranscription.label")}
</div>
</Button>
<Button className="w-full">
<div className="flex gap-1">
<TbFaceId />
{t("trainFace", { ns: "views/faceLibrary" })}
</div>
</Button>
</FaceSelectionDialog>
)}
{config?.cameras[search?.camera].audio_transcription.enabled &&
search?.label == "speech" &&
search?.end_time && (
<Button className="w-full" onClick={onTranscribe}>
<div className="flex gap-1">
<CgTranscript />
{t("itemMenu.audioTranscription.label")}
</div>
</Button>
)}
</div>
</div>
</div>
)}
</div>
<div className="flex flex-col gap-1.5">
{config?.cameras[search.camera].objects.genai.enabled &&
@ -1167,7 +1296,7 @@ export function ObjectSnapshotTab({
search.label != "on_demand" && (
<Card className="p-1 text-sm md:p-2">
<CardContent className="flex flex-col items-center justify-between gap-3 p-2 md:flex-row">
<div className={cn("flex flex-col space-y-3")}>
<div className={cn("flex max-w-sm flex-col space-y-3")}>
<div className={"text-lg leading-none"}>
{t("explore.plus.submitToPlus.label")}
</div>
@ -1176,7 +1305,7 @@ export function ObjectSnapshotTab({
</div>
</div>
<div className="flex w-full flex-1 flex-col justify-center gap-2 md:ml-8 md:w-auto md:justify-end">
<div className="flex w-full flex-1 flex-col justify-center gap-2 md:ml-8 md:flex-1 md:justify-end">
{state == "reviewing" && (
<>
<div>

File diff suppressed because it is too large Load Diff

View File

@ -289,6 +289,7 @@ export default function VideoControls({
}}
onUploadFrame={onUploadFrame}
containerRef={containerRef}
fullscreen={fullscreen}
/>
)}
{features.fullscreen && toggleFullscreen && (
@ -306,6 +307,7 @@ type FrigatePlusUploadButtonProps = {
onClose: () => void;
onUploadFrame: () => void;
containerRef?: React.MutableRefObject<HTMLDivElement | null>;
fullscreen?: boolean;
};
function FrigatePlusUploadButton({
video,
@ -313,6 +315,7 @@ function FrigatePlusUploadButton({
onClose,
onUploadFrame,
containerRef,
fullscreen,
}: FrigatePlusUploadButtonProps) {
const { t } = useTranslation(["components/player"]);
@ -349,7 +352,11 @@ function FrigatePlusUploadButton({
/>
</AlertDialogTrigger>
<AlertDialogContent
portalProps={{ container: containerRef?.current }}
portalProps={
fullscreen && containerRef?.current
? { container: containerRef.current }
: undefined
}
className="md:max-w-2xl lg:max-w-3xl xl:max-w-4xl"
>
<AlertDialogHeader>

View File

@ -174,9 +174,7 @@ export default function CameraWizardDialog({
...(friendlyName && { friendly_name: friendlyName }),
ffmpeg: {
inputs: wizardData.streams.map((stream, index) => {
const isRestreamed =
wizardData.restreamIds?.includes(stream.id) ?? false;
if (isRestreamed) {
if (stream.restream) {
const go2rtcStreamName =
wizardData.streams!.length === 1
? finalCameraName
@ -234,7 +232,11 @@ export default function CameraWizardDialog({
wizardData.streams!.length === 1
? finalCameraName
: `${finalCameraName}_${index + 1}`;
go2rtcStreams[streamName] = [stream.url];
const streamUrl = stream.useFfmpeg
? `ffmpeg:${stream.url}`
: stream.url;
go2rtcStreams[streamName] = [streamUrl];
});
if (Object.keys(go2rtcStreams).length > 0) {

View File

@ -385,7 +385,7 @@ export default function Step1NameCamera({
</FormLabel>
<FormControl>
<Input
className="h-8"
className="text-md h-8"
placeholder={t(
"cameraWizard.step1.cameraNamePlaceholder",
)}
@ -475,7 +475,7 @@ export default function Step1NameCamera({
</FormLabel>
<FormControl>
<Input
className="h-8"
className="text-md h-8"
placeholder="192.168.1.100"
{...field}
/>
@ -495,7 +495,7 @@ export default function Step1NameCamera({
</FormLabel>
<FormControl>
<Input
className="h-8"
className="text-md h-8"
placeholder={t(
"cameraWizard.step1.usernamePlaceholder",
)}
@ -518,7 +518,7 @@ export default function Step1NameCamera({
<FormControl>
<div className="relative">
<Input
className="h-8 pr-10"
className="text-md h-8 pr-10"
type={showPassword ? "text" : "password"}
placeholder={t(
"cameraWizard.step1.passwordPlaceholder",
@ -558,7 +558,7 @@ export default function Step1NameCamera({
</FormLabel>
<FormControl>
<Input
className="h-8"
className="text-md h-8"
placeholder="rtsp://username:password@host:port/path"
{...field}
/>
@ -608,6 +608,12 @@ export default function Step1NameCamera({
</div>
)}
{isTesting && (
<div className="flex items-center gap-2 text-sm text-muted-foreground">
<ActivityIndicator className="size-4" />
{testStatus}
</div>
)}
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
<Button
type="button"
@ -635,10 +641,7 @@ export default function Step1NameCamera({
variant="select"
className="flex items-center justify-center gap-2 sm:flex-1"
>
{isTesting && <ActivityIndicator className="size-4" />}
{isTesting && testStatus
? testStatus
: t("cameraWizard.step1.testConnection")}
{t("cameraWizard.step1.testConnection")}
</Button>
)}
</div>

View File

@ -201,16 +201,12 @@ export default function Step2StreamConfig({
const setRestream = useCallback(
(streamId: string) => {
const currentIds = wizardData.restreamIds || [];
const isSelected = currentIds.includes(streamId);
const newIds = isSelected
? currentIds.filter((id) => id !== streamId)
: [...currentIds, streamId];
onUpdate({
restreamIds: newIds,
});
const stream = streams.find((s) => s.id === streamId);
if (!stream) return;
updateStream(streamId, { restream: !stream.restream });
},
[wizardData.restreamIds, onUpdate],
[streams, updateStream],
);
const hasDetectRole = streams.some((s) => s.roles.includes("detect"));
@ -435,9 +431,7 @@ export default function Step2StreamConfig({
{t("cameraWizard.step2.go2rtc")}
</span>
<Switch
checked={(wizardData.restreamIds || []).includes(
stream.id,
)}
checked={stream.restream || false}
onCheckedChange={() => setRestream(stream.id)}
/>
</div>

View File

@ -1,7 +1,13 @@
import { Button } from "@/components/ui/button";
import { Badge } from "@/components/ui/badge";
import { Switch } from "@/components/ui/switch";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/ui/popover";
import { useTranslation } from "react-i18next";
import { LuRotateCcw } from "react-icons/lu";
import { LuRotateCcw, LuInfo } from "react-icons/lu";
import { useState, useCallback, useMemo, useEffect } from "react";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import axios from "axios";
@ -216,7 +222,6 @@ export default function Step3Validation({
brandTemplate: wizardData.brandTemplate,
customUrl: wizardData.customUrl,
streams: wizardData.streams,
restreamIds: wizardData.restreamIds,
};
onSave(configData);
@ -322,6 +327,51 @@ export default function Step3Validation({
</div>
)}
{result?.success && (
<div className="mb-3 flex items-center justify-between">
<div className="flex items-center gap-2">
<span className="text-sm">
{t("cameraWizard.step3.ffmpegModule")}
</span>
<Popover>
<PopoverTrigger asChild>
<Button
variant="ghost"
size="sm"
className="h-4 w-4 p-0"
>
<LuInfo className="size-3" />
</Button>
</PopoverTrigger>
<PopoverContent className="pointer-events-auto w-80 text-xs">
<div className="space-y-2">
<div className="font-medium">
{t("cameraWizard.step3.ffmpegModule")}
</div>
<div className="text-muted-foreground">
{t(
"cameraWizard.step3.ffmpegModuleDescription",
)}
</div>
</div>
</PopoverContent>
</Popover>
</div>
<Switch
checked={stream.useFfmpeg || false}
onCheckedChange={(checked) => {
onUpdate({
streams: streams.map((s) =>
s.id === stream.id
? { ...s, useFfmpeg: checked }
: s,
),
});
}}
/>
</div>
)}
<div className="mb-2 flex flex-col justify-between gap-1 md:flex-row md:items-center">
<span className="break-all text-sm text-muted-foreground">
{stream.url}
@ -491,8 +541,7 @@ function StreamIssues({
// Restreaming check
if (stream.roles.includes("record")) {
const restreamIds = wizardData.restreamIds || [];
if (restreamIds.includes(stream.id)) {
if (stream.restream) {
result.push({
type: "warning",
message: t("cameraWizard.step3.issues.restreamingWarning"),
@ -660,9 +709,10 @@ function StreamPreview({ stream, onBandwidthUpdate }: StreamPreviewProps) {
useEffect(() => {
// Register stream with go2rtc
const streamUrl = stream.useFfmpeg ? `ffmpeg:${stream.url}` : stream.url;
axios
.put(`go2rtc/streams/${streamId}`, null, {
params: { src: stream.url },
params: { src: streamUrl },
})
.then(() => {
// Add small delay to allow go2rtc api to run and initialize the stream
@ -680,7 +730,7 @@ function StreamPreview({ stream, onBandwidthUpdate }: StreamPreviewProps) {
// do nothing on cleanup errors - go2rtc won't consume the streams
});
};
}, [stream.url, streamId]);
}, [stream.url, stream.useFfmpeg, streamId]);
const resolution = stream.testResult?.resolution;
let aspectRatio = "16/9";

View File

@ -22,6 +22,7 @@ import {
LuChevronRight,
LuSettings,
} from "react-icons/lu";
import { MdAutoAwesome } from "react-icons/md";
import { getTranslatedLabel } from "@/utils/i18n";
import EventMenu from "@/components/timeline/EventMenu";
import { FrigatePlusDialog } from "@/components/overlay/dialog/FrigatePlusDialog";
@ -57,7 +58,7 @@ export default function DetailStream({
elementRef: scrollRef,
});
const effectiveTime = currentTime + annotationOffset / 1000;
const effectiveTime = currentTime - annotationOffset / 1000;
const [upload, setUpload] = useState<Event | undefined>(undefined);
const [controlsExpanded, setControlsExpanded] = useState(false);
const [alwaysExpandActive, setAlwaysExpandActive] = usePersistence(
@ -213,6 +214,7 @@ export default function DetailStream({
config={config}
onSeek={onSeekCheckPlaying}
effectiveTime={effectiveTime}
annotationOffset={annotationOffset}
isActive={activeReviewId == id}
onActivate={() => setActiveReviewId(id)}
onOpenUpload={(e) => setUpload(e)}
@ -278,6 +280,7 @@ type ReviewGroupProps = {
onActivate?: () => void;
onOpenUpload?: (e: Event) => void;
effectiveTime?: number;
annotationOffset: number;
alwaysExpandActive?: boolean;
};
@ -290,11 +293,14 @@ function ReviewGroup({
onActivate,
onOpenUpload,
effectiveTime,
annotationOffset,
alwaysExpandActive = false,
}: ReviewGroupProps) {
const { t } = useTranslation("views/events");
const [open, setOpen] = useState(false);
const start = review.start_time ?? 0;
// review.start_time is in detect time, convert to record for seeking
const startRecord = start + annotationOffset / 1000;
// Auto-expand when this review becomes active and alwaysExpandActive is enabled
useEffect(() => {
@ -362,7 +368,11 @@ function ReviewGroup({
return (
<div
data-review-id={id}
className="cursor-pointer rounded-lg bg-secondary py-3"
className={`mx-1 cursor-pointer rounded-lg bg-secondary px-0 py-3 outline outline-[2px] -outline-offset-[1.8px] ${
isActive
? "shadow-selected outline-selected"
: "outline-transparent duration-500"
}`}
>
<div
className={cn(
@ -371,16 +381,16 @@ function ReviewGroup({
)}
onClick={() => {
onActivate?.();
onSeek(start);
onSeek(startRecord);
}}
>
<div className="ml-4 mr-2 mt-1.5 flex flex-row items-start">
<LuCircle
className={cn(
"size-3",
isActive
? "fill-selected text-selected"
: "fill-muted duration-500 dark:fill-secondary-highlight dark:text-secondary-highlight",
"size-3 duration-500",
review.severity == "alert"
? "fill-severity_alert text-severity_alert"
: "fill-severity_detection text-severity_detection",
)}
/>
</div>
@ -401,8 +411,9 @@ function ReviewGroup({
</div>
<div className="flex flex-col gap-0.5">
{review.data.metadata?.title && (
<div className="mb-1 text-sm text-primary-variant">
{review.data.metadata.title}
<div className="mb-1 flex items-center gap-1 text-sm text-primary-variant">
<MdAutoAwesome className="size-3 shrink-0" />
<span className="truncate">{review.data.metadata.title}</span>
</div>
)}
<div className="flex flex-row items-center gap-1.5">
@ -449,7 +460,9 @@ function ReviewGroup({
<EventList
key={event.id}
event={event}
review={review}
effectiveTime={effectiveTime}
annotationOffset={annotationOffset}
onSeek={onSeek}
onOpenUpload={onOpenUpload}
/>
@ -482,13 +495,17 @@ function ReviewGroup({
type EventListProps = {
event: Event;
review: ReviewSegment;
effectiveTime?: number;
annotationOffset: number;
onSeek: (ts: number, play?: boolean) => void;
onOpenUpload?: (e: Event) => void;
};
function EventList({
event,
review,
effectiveTime,
annotationOffset,
onSeek,
onOpenUpload,
}: EventListProps) {
@ -505,14 +522,17 @@ function EventList({
if (event) {
setSelectedObjectIds([]);
setSelectedObjectIds([event.id]);
onSeek(event.start_time);
// event.start_time is detect time, convert to record
const recordTime = event.start_time + annotationOffset / 1000;
onSeek(recordTime);
} else {
setSelectedObjectIds([]);
}
};
const handleTimelineClick = (ts: number, play?: boolean) => {
handleObjectSelect(event);
setSelectedObjectIds([]);
setSelectedObjectIds([event.id]);
onSeek(ts, play);
};
@ -554,7 +574,6 @@ function EventList({
)}
onClick={(e) => {
e.stopPropagation();
onSeek(event.start_time);
handleObjectSelect(event);
}}
role="button"
@ -568,7 +587,6 @@ function EventList({
className="flex flex-1 items-center gap-2"
onClick={(e) => {
e.stopPropagation();
onSeek(event.start_time);
handleObjectSelect(event);
}}
role="button"
@ -604,9 +622,11 @@ function EventList({
<div className="mt-2">
<ObjectTimeline
review={review}
eventId={event.id}
onSeek={handleTimelineClick}
effectiveTime={effectiveTime}
annotationOffset={annotationOffset}
startTime={event.start_time}
endTime={event.end_time}
/>
@ -621,6 +641,7 @@ type LifecycleItemProps = {
isActive?: boolean;
onSeek?: (timestamp: number, play?: boolean) => void;
effectiveTime?: number;
annotationOffset: number;
isTimelineActive?: boolean;
};
@ -629,6 +650,7 @@ function LifecycleItem({
isActive,
onSeek,
effectiveTime,
annotationOffset,
isTimelineActive = false,
}: LifecycleItemProps) {
const { t } = useTranslation("views/events");
@ -682,7 +704,8 @@ function LifecycleItem({
<div
role="button"
onClick={() => {
onSeek?.(item.timestamp, false);
const recordTimestamp = item.timestamp + annotationOffset / 1000;
onSeek?.(recordTimestamp, false);
}}
className={cn(
"flex cursor-pointer items-center gap-2 text-sm text-primary-variant",
@ -748,26 +771,44 @@ function LifecycleItem({
// Fetch and render timeline entries for a single event id on demand.
function ObjectTimeline({
review,
eventId,
onSeek,
effectiveTime,
annotationOffset,
startTime,
endTime,
}: {
review: ReviewSegment;
eventId: string;
onSeek: (ts: number, play?: boolean) => void;
effectiveTime?: number;
annotationOffset: number;
startTime?: number;
endTime?: number;
}) {
const { t } = useTranslation("views/events");
const { data: timeline, isValidating } = useSWR<TrackingDetailsSequence[]>([
const { data: fullTimeline, isValidating } = useSWR<
TrackingDetailsSequence[]
>([
"timeline",
{
source_id: eventId,
},
]);
const timeline = useMemo(() => {
if (!fullTimeline) {
return fullTimeline;
}
return fullTimeline.filter(
(t) =>
t.timestamp >= review.start_time &&
(review.end_time == undefined || t.timestamp <= review.end_time),
);
}, [fullTimeline, review]);
if (isValidating && (!timeline || timeline.length === 0)) {
return <ActivityIndicator className="ml-2 size-3" />;
}
@ -857,6 +898,7 @@ function ObjectTimeline({
onSeek={onSeek}
isActive={isActive}
effectiveTime={effectiveTime}
annotationOffset={annotationOffset}
isTimelineActive={isWithinEventRange}
/>
);

View File

@ -1,4 +1,3 @@
import { useApiHost } from "@/api";
import { useTimelineUtils } from "@/hooks/use-timeline-utils";
import { useEventSegmentUtils } from "@/hooks/use-event-segment-utils";
import { ReviewSegment, ReviewSeverity } from "@/types/review";
@ -18,6 +17,7 @@ import { HoverCardPortal } from "@radix-ui/react-hover-card";
import scrollIntoView from "scroll-into-view-if-needed";
import { MinimapBounds, Tick, Timestamp } from "./segment-metadata";
import useTapUtils from "@/hooks/use-tap-utils";
import ReviewCard from "../card/ReviewCard";
type EventSegmentProps = {
events: ReviewSegment[];
@ -54,7 +54,7 @@ export function EventSegment({
displaySeverityType,
shouldShowRoundedCorners,
getEventStart,
getEventThumbnail,
getEvent,
} = useEventSegmentUtils(segmentDuration, events, severityType);
const { alignStartDateToTimeline, alignEndDateToTimeline } = useTimelineUtils(
@ -87,13 +87,11 @@ export function EventSegment({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [getEventStart, segmentTime]);
const apiHost = useApiHost();
const { handleTouchStart } = useTapUtils();
const eventThumbnail = useMemo(() => {
return getEventThumbnail(segmentTime);
}, [getEventThumbnail, segmentTime]);
const segmentEvent = useMemo(() => {
return getEvent(segmentTime);
}, [getEvent, segmentTime]);
const timestamp = useMemo(() => new Date(segmentTime * 1000), [segmentTime]);
const segmentKey = useMemo(
@ -252,10 +250,7 @@ export function EventSegment({
className="w-[250px] rounded-lg p-2 md:rounded-2xl"
side="left"
>
<img
className="rounded-lg"
src={`${apiHost}${eventThumbnail.replace("/media/frigate/", "")}`}
/>
{segmentEvent && <ReviewCard event={segmentEvent} />}
</HoverCardContent>
</HoverCardPortal>
</HoverCard>

View File

@ -101,7 +101,7 @@ export default function Step1NameAndType({
const form = useForm<z.infer<typeof formSchema>>({
resolver: zodResolver(formSchema),
mode: "onChange",
mode: "onBlur",
defaultValues: {
enabled: true,
name: initialData?.name ?? trigger?.name ?? "",

View File

@ -22,6 +22,7 @@ interface DetailStreamProviderProps {
isDetailMode: boolean;
currentTime: number;
camera: string;
initialSelectedObjectIds?: string[];
}
export function DetailStreamProvider({
@ -29,8 +30,11 @@ export function DetailStreamProvider({
isDetailMode,
currentTime,
camera,
initialSelectedObjectIds,
}: DetailStreamProviderProps) {
const [selectedObjectIds, setSelectedObjectIds] = useState<string[]>([]);
const [selectedObjectIds, setSelectedObjectIds] = useState<string[]>(
() => initialSelectedObjectIds ?? [],
);
const toggleObjectSelection = (id: string | undefined) => {
if (id === undefined) {

View File

@ -191,8 +191,8 @@ export const useEventSegmentUtils = (
[events, getSegmentStart, getSegmentEnd, severityType],
);
const getEventThumbnail = useCallback(
(time: number): string => {
const getEvent = useCallback(
(time: number): ReviewSegment | undefined => {
const matchingEvent = events.find((event) => {
return (
time >= getSegmentStart(event.start_time) &&
@ -201,7 +201,7 @@ export const useEventSegmentUtils = (
);
});
return matchingEvent?.thumb_path ?? "";
return matchingEvent;
},
[events, getSegmentStart, getSegmentEnd, severityType],
);
@ -214,6 +214,6 @@ export const useEventSegmentUtils = (
getReviewed,
shouldShowRoundedCorners,
getEventStart,
getEventThumbnail,
getEvent,
};
};

View File

@ -845,6 +845,7 @@ function FaceAttemptGroup({
selectedItems={selectedFaces}
i18nLibrary="views/faceLibrary"
objectType="person"
noClassificationLabel="details.unknown"
onClick={(data) => {
if (data) {
onClickFaces([data.filename], true);

View File

@ -157,9 +157,11 @@ function MobileMenuItem({
const { t } = useTranslation(["views/settings"]);
return (
<Button
variant="ghost"
className={cn("w-full justify-between pr-2", className)}
<div
className={cn(
"inline-flex h-10 w-full cursor-pointer items-center justify-between whitespace-nowrap rounded-md px-4 py-2 pr-2 text-sm font-medium text-primary-variant disabled:pointer-events-none disabled:opacity-50",
className,
)}
onClick={() => {
onSelect(item.key);
onClose?.();
@ -167,7 +169,7 @@ function MobileMenuItem({
>
<div className="smart-capitalize">{t("menu." + item.key)}</div>
<LuChevronRight className="size-4" />
</Button>
</div>
);
}
@ -273,6 +275,9 @@ export default function Settings() {
} else {
setPageToggle(page as SettingsType);
}
if (isMobile) {
setContentMobileOpen(true);
}
}
// don't clear url params if we're creating a new object mask
return !(searchParams.has("object_mask") || searchParams.has("event_id"));
@ -282,6 +287,9 @@ export default function Settings() {
const cameraNames = cameras.map((c) => c.name);
if (cameraNames.includes(camera)) {
setSelectedCamera(camera);
if (isMobile) {
setContentMobileOpen(true);
}
}
// don't clear url params if we're creating a new object mask or trigger
return !(searchParams.has("object_mask") || searchParams.has("event_id"));

View File

@ -85,6 +85,8 @@ export type StreamConfig = {
quality?: string;
testResult?: TestResult;
userTested?: boolean;
useFfmpeg?: boolean;
restream?: boolean;
};
export type TestResult = {
@ -105,7 +107,6 @@ export type WizardFormData = {
brandTemplate?: CameraBrand;
customUrl?: string;
streams?: StreamConfig[];
restreamIds?: string[];
};
// API Response Types
@ -146,6 +147,7 @@ export type CameraConfigData = {
inputs: {
path: string;
roles: string[];
input_args?: string;
}[];
};
live?: {

View File

@ -306,6 +306,7 @@ export type CustomClassificationModelConfig = {
threshold: number;
object_config?: {
objects: string[];
classification_type: string;
};
state_config?: {
cameras: {

View File

@ -13,7 +13,8 @@ function formatZonesList(zones: string[]): string {
});
}
const allButLast = zones.slice(0, -1).join(", ");
const separatorWithSpace = t("list.separatorWithSpace", { ns: "common" });
const allButLast = zones.slice(0, -1).join(separatorWithSpace);
return t("list.many", {
items: allButLast,
last: zones[zones.length - 1],

View File

@ -43,5 +43,5 @@ export function generateFixedHash(name: string, prefix: string = "id"): string {
* @returns True if the name is valid, false otherwise
*/
export function isValidId(name: string): boolean {
return /^[a-zA-Z0-9_-]+$/.test(name);
return /^[a-zA-Z0-9_-]+$/.test(name) && !/^\d+$/.test(name);
}

View File

@ -1,8 +1,9 @@
import { baseUrl } from "@/api/baseUrl";
import ClassificationModelWizardDialog from "@/components/classification/ClassificationModelWizardDialog";
import ClassificationModelEditDialog from "@/components/classification/ClassificationModelEditDialog";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import { ImageShadowOverlay } from "@/components/overlay/ImageShadowOverlay";
import { Button } from "@/components/ui/button";
import { Button, buttonVariants } from "@/components/ui/button";
import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
import useOptimisticState from "@/hooks/use-optimistic-state";
import { cn } from "@/lib/utils";
@ -10,13 +11,34 @@ import {
CustomClassificationModelConfig,
FrigateConfig,
} from "@/types/frigateConfig";
import { useEffect, useMemo, useState } from "react";
import { useCallback, useEffect, useMemo, useState } from "react";
import { useTranslation } from "react-i18next";
import { FaFolderPlus } from "react-icons/fa";
import { MdModelTraining } from "react-icons/md";
import { LuPencil, LuTrash2 } from "react-icons/lu";
import { FiMoreVertical } from "react-icons/fi";
import useSWR from "swr";
import Heading from "@/components/ui/heading";
import { useOverlayState } from "@/hooks/use-overlay-state";
import axios from "axios";
import { toast } from "sonner";
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu";
import {
AlertDialog,
AlertDialogAction,
AlertDialogCancel,
AlertDialogContent,
AlertDialogDescription,
AlertDialogFooter,
AlertDialogHeader,
AlertDialogTitle,
} from "@/components/ui/alert-dialog";
import BlurredIconButton from "@/components/button/BlurredIconButton";
const allModelTypes = ["objects", "states"] as const;
type ModelType = (typeof allModelTypes)[number];
@ -126,7 +148,7 @@ export default function ModelSelectionView({
onClick={() => setNewModel(true)}
>
<FaFolderPlus />
Add Classification
{t("button.addClassification")}
</Button>
</div>
</div>
@ -142,6 +164,8 @@ export default function ModelSelectionView({
key={config.name}
config={config}
onClick={() => onClick(config)}
onUpdate={() => refreshConfig()}
onDelete={() => refreshConfig()}
/>
))}
</div>
@ -179,12 +203,62 @@ function NoModelsView({
type ModelCardProps = {
config: CustomClassificationModelConfig;
onClick: () => void;
onUpdate: () => void;
onDelete: () => void;
};
function ModelCard({ config, onClick }: ModelCardProps) {
function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
const { t } = useTranslation(["views/classificationModel"]);
const { data: dataset } = useSWR<{
[id: string]: string[];
}>(`classification/${config.name}/dataset`, { revalidateOnFocus: false });
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
const [editDialogOpen, setEditDialogOpen] = useState(false);
const handleDelete = useCallback(async () => {
try {
await axios.delete(`classification/${config.name}`);
await axios.put("/config/set", {
requires_restart: 0,
update_topic: `config/classification/custom/${config.name}`,
config_data: {
classification: {
custom: {
[config.name]: "",
},
},
},
});
toast.success(t("toast.success.deletedModel", { count: 1 }), {
position: "top-center",
});
onDelete();
} catch (err) {
const error = err as {
response?: { data?: { message?: string; detail?: string } };
};
const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(t("toast.error.deleteModelFailed", { errorMessage }), {
position: "top-center",
});
}
}, [config, onDelete, t]);
const handleDeleteClick = useCallback((e: React.MouseEvent) => {
e.stopPropagation();
setDeleteDialogOpen(true);
}, []);
const handleEditClick = useCallback((e: React.MouseEvent) => {
e.stopPropagation();
setEditDialogOpen(true);
}, []);
const coverImage = useMemo(() => {
if (!dataset) {
return undefined;
@ -204,22 +278,76 @@ function ModelCard({ config, onClick }: ModelCardProps) {
}, [dataset]);
return (
<div
key={config.name}
className={cn(
"relative aspect-square w-full cursor-pointer overflow-hidden rounded-lg",
"outline-transparent duration-500",
)}
onClick={() => onClick()}
>
<img
className="size-full"
src={`${baseUrl}clips/${config.name}/dataset/${coverImage?.name}/${coverImage?.img}`}
<>
<ClassificationModelEditDialog
open={editDialogOpen}
model={config}
onClose={() => setEditDialogOpen(false)}
onSuccess={() => onUpdate()}
/>
<ImageShadowOverlay />
<div className="absolute bottom-2 left-3 text-lg smart-capitalize">
{config.name}
<AlertDialog
open={deleteDialogOpen}
onOpenChange={() => setDeleteDialogOpen(!deleteDialogOpen)}
>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>{t("deleteModel.title")}</AlertDialogTitle>
</AlertDialogHeader>
<AlertDialogDescription>
{t("deleteModel.single", { name: config.name })}
</AlertDialogDescription>
<AlertDialogFooter>
<AlertDialogCancel>
{t("button.cancel", { ns: "common" })}
</AlertDialogCancel>
<AlertDialogAction
className={buttonVariants({ variant: "destructive" })}
onClick={handleDelete}
>
{t("button.delete", { ns: "common" })}
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
<div
className={cn(
"relative aspect-square w-full cursor-pointer overflow-hidden rounded-lg",
)}
onClick={onClick}
>
<img
className="size-full"
src={`${baseUrl}clips/${config.name}/dataset/${coverImage?.name}/${coverImage?.img}`}
/>
<ImageShadowOverlay lowerClassName="h-[30%] z-0" />
<div className="absolute bottom-2 left-3 text-lg text-white smart-capitalize">
{config.name}
</div>
<div className="absolute bottom-2 right-2 z-40">
<DropdownMenu>
<DropdownMenuTrigger asChild onClick={(e) => e.stopPropagation()}>
<BlurredIconButton>
<FiMoreVertical className="size-5 text-white" />
</BlurredIconButton>
</DropdownMenuTrigger>
<DropdownMenuContent
align="end"
onClick={(e) => e.stopPropagation()}
>
<DropdownMenuItem onClick={handleEditClick}>
<LuPencil className="mr-2 size-4" />
<span>{t("button.edit", { ns: "common" })}</span>
</DropdownMenuItem>
<DropdownMenuItem onClick={handleDeleteClick}>
<LuTrash2 className="mr-2 size-4" />
<span>{t("button.delete", { ns: "common" })}</span>
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</div>
</div>
</div>
</>
);
}

View File

@ -327,31 +327,39 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
</AlertDialog>
<div className="flex flex-row justify-between gap-2 p-2 align-middle">
<div className="flex flex-row items-center justify-center gap-2">
<Button
className="flex items-center gap-2.5 rounded-lg"
aria-label={t("label.back", { ns: "common" })}
onClick={() => navigate(-1)}
>
<IoMdArrowRoundBack className="size-5 text-secondary-foreground" />
{isDesktop && (
<div className="text-primary">
{t("button.back", { ns: "common" })}
</div>
)}
</Button>
<LibrarySelector
pageToggle={pageToggle}
dataset={dataset || {}}
trainImages={trainImages || []}
setPageToggle={setPageToggle}
onDelete={onDelete}
onRename={() => {}}
/>
</div>
{(isDesktop || !selectedImages?.length) && (
<div className="flex flex-row items-center justify-center gap-2">
<Button
className="flex items-center gap-2.5 rounded-lg"
aria-label={t("label.back", { ns: "common" })}
onClick={() => navigate(-1)}
>
<IoMdArrowRoundBack className="size-5 text-secondary-foreground" />
{isDesktop && (
<div className="text-primary">
{t("button.back", { ns: "common" })}
</div>
)}
</Button>
<LibrarySelector
pageToggle={pageToggle}
dataset={dataset || {}}
trainImages={trainImages || []}
setPageToggle={setPageToggle}
onDelete={onDelete}
onRename={() => {}}
/>
</div>
)}
{selectedImages?.length > 0 ? (
<div className="flex items-center justify-center gap-2">
<div className="mx-1 flex w-48 items-center justify-center text-sm text-muted-foreground">
<div
className={cn(
"flex w-full items-center justify-end gap-2",
isMobileOnly && "justify-between",
)}
>
<div className="flex w-48 items-center justify-center text-sm text-muted-foreground">
<div className="p-1">{`${selectedImages.length} selected`}</div>
<div className="p-1">{"|"}</div>
<div
@ -893,7 +901,7 @@ function ObjectTrainGrid({
// selection
const [selectedEvent, setSelectedEvent] = useState<Event>();
const [dialogTab, setDialogTab] = useState<SearchTab>("details");
const [dialogTab, setDialogTab] = useState<SearchTab>("snapshot");
// handlers
@ -961,6 +969,7 @@ function ObjectTrainGrid({
selectedItems={selectedImages}
i18nLibrary="views/classificationModel"
objectType={model.object_config?.objects?.at(0) ?? "Object"}
noClassificationLabel="details.none"
onClick={(data) => {
if (data) {
onClickImages([data.filename], true);

View File

@ -136,7 +136,7 @@ export default function EventView({
const [selectedReviews, setSelectedReviews] = useState<ReviewSegment[]>([]);
const onSelectReview = useCallback(
(review: ReviewSegment, ctrl: boolean) => {
(review: ReviewSegment, ctrl: boolean, detail: boolean) => {
if (selectedReviews.length > 0 || ctrl) {
const index = selectedReviews.findIndex((r) => r.id === review.id);
@ -156,17 +156,31 @@ export default function EventView({
setSelectedReviews(copy);
}
} else {
// If a specific date is selected in the calendar and it's after the event start,
// use the selected date instead of the event start time
const effectiveStartTime =
timeRange.after > review.start_time
? timeRange.after
: review.start_time;
onOpenRecording({
camera: review.camera,
startTime: review.start_time - REVIEW_PADDING,
startTime: effectiveStartTime - REVIEW_PADDING,
severity: review.severity,
timelineType: detail ? "detail" : undefined,
});
review.has_been_reviewed = true;
markItemAsReviewed(review);
}
},
[selectedReviews, setSelectedReviews, onOpenRecording, markItemAsReviewed],
[
selectedReviews,
setSelectedReviews,
onOpenRecording,
markItemAsReviewed,
timeRange.after,
],
);
const onSelectAllReviews = useCallback(() => {
if (!currentReviewItems || currentReviewItems.length == 0) {
@ -402,7 +416,6 @@ export default function EventView({
onSelectAllReviews={onSelectAllReviews}
setSelectedReviews={setSelectedReviews}
pullLatestData={pullLatestData}
onOpenRecording={onOpenRecording}
/>
)}
{severity == "significant_motion" && (
@ -442,11 +455,14 @@ type DetectionReviewProps = {
loading: boolean;
markItemAsReviewed: (review: ReviewSegment) => void;
markAllItemsAsReviewed: (currentItems: ReviewSegment[]) => void;
onSelectReview: (review: ReviewSegment, ctrl: boolean) => void;
onSelectReview: (
review: ReviewSegment,
ctrl: boolean,
detail: boolean,
) => void;
onSelectAllReviews: () => void;
setSelectedReviews: (reviews: ReviewSegment[]) => void;
pullLatestData: () => void;
onOpenRecording: (recordingInfo: RecordingStartingPoint) => void;
};
function DetectionReview({
contentRef,
@ -466,7 +482,6 @@ function DetectionReview({
onSelectAllReviews,
setSelectedReviews,
pullLatestData,
onOpenRecording,
}: DetectionReviewProps) {
const { t } = useTranslation(["views/events"]);
@ -758,16 +773,7 @@ function DetectionReview({
ctrl: boolean,
detail: boolean,
) => {
if (detail) {
onOpenRecording({
camera: review.camera,
startTime: review.start_time - REVIEW_PADDING,
severity: review.severity,
timelineType: "detail",
});
} else {
onSelectReview(review, ctrl);
}
onSelectReview(review, ctrl, detail);
}}
/>
</div>

View File

@ -970,12 +970,11 @@ function Timeline({
"relative overflow-hidden",
isDesktop
? cn(
"no-scrollbar overflow-y-auto",
timelineType == "timeline"
? "w-[100px] flex-shrink-0"
: timelineType == "detail"
? "min-w-[20rem] max-w-[30%] flex-shrink-0 flex-grow-0 basis-[30rem] md:min-w-[20rem] md:max-w-[25%] lg:min-w-[30rem] lg:max-w-[33%]"
: "w-60 flex-shrink-0",
: "w-80 flex-shrink-0",
)
: cn(
timelineType == "timeline"

View File

@ -214,7 +214,7 @@ export default function SearchView({
// detail
const [searchDetail, setSearchDetail] = useState<SearchResult>();
const [page, setPage] = useState<SearchTab>("details");
const [page, setPage] = useState<SearchTab>("snapshot");
// search interaction
@ -222,7 +222,7 @@ export default function SearchView({
const itemRefs = useRef<(HTMLDivElement | null)[]>([]);
const onSelectSearch = useCallback(
(item: SearchResult, ctrl: boolean, page: SearchTab = "details") => {
(item: SearchResult, ctrl: boolean, page: SearchTab = "snapshot") => {
if (selectedObjects.length > 1 || ctrl) {
const index = selectedObjects.indexOf(item.id);

View File

@ -717,11 +717,11 @@ export default function CameraSettingsView({
<div className="flex w-full flex-row items-center gap-2 pt-2 md:w-[25%]">
<Button
className="flex flex-1"
aria-label={t("button.cancel", { ns: "common" })}
aria-label={t("button.reset", { ns: "common" })}
onClick={onCancel}
type="button"
>
<Trans>button.cancel</Trans>
<Trans>button.reset</Trans>
</Button>
<Button
variant="select"