mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-04-03 06:40:22 +00:00
Compare commits
16 Commits
f2be3ba486
...
955a5fc5fd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
955a5fc5fd | ||
|
|
85f7138361 | ||
|
|
fc1cad2872 | ||
|
|
5529432856 | ||
|
|
59963fc47e | ||
|
|
31fa87ce73 | ||
|
|
740c618240 | ||
|
|
4f76b34f44 | ||
|
|
d44340eca6 | ||
|
|
aff82f809c | ||
|
|
1e50d83d06 | ||
|
|
36fb27ef56 | ||
|
|
9937a7cc3d | ||
|
|
7aac6b4f21 | ||
|
|
338b681ed0 | ||
|
|
0344183391 |
@ -2,9 +2,9 @@
|
||||
set -e
|
||||
|
||||
# Download the MxAccl for Frigate github release
|
||||
wget https://github.com/memryx/mx_accl_frigate/archive/refs/heads/main.zip -O /tmp/mxaccl.zip
|
||||
wget https://github.com/memryx/mx_accl_frigate/archive/refs/tags/v2.1.0.zip -O /tmp/mxaccl.zip
|
||||
unzip /tmp/mxaccl.zip -d /tmp
|
||||
mv /tmp/mx_accl_frigate-main /opt/mx_accl_frigate
|
||||
mv /tmp/mx_accl_frigate-2.1.0 /opt/mx_accl_frigate
|
||||
rm /tmp/mxaccl.zip
|
||||
|
||||
# Install Python dependencies
|
||||
|
||||
@ -5,7 +5,7 @@ aiohttp == 3.12.*
|
||||
starlette == 0.47.*
|
||||
starlette-context == 0.4.*
|
||||
fastapi[standard-no-fastapi-cloud-cli] == 0.116.*
|
||||
uvicorn == 0.35.*
|
||||
uvicorn == 0.38.*
|
||||
slowapi == 0.1.*
|
||||
joserfc == 1.2.*
|
||||
cryptography == 44.0.*
|
||||
@ -56,7 +56,7 @@ pywebpush == 2.0.*
|
||||
# alpr
|
||||
pyclipper == 1.3.*
|
||||
shapely == 2.0.*
|
||||
Levenshtein==0.26.*
|
||||
rapidfuzz==3.12.*
|
||||
# HailoRT Wheels
|
||||
appdirs==1.4.*
|
||||
argcomplete==2.0.*
|
||||
|
||||
@ -24,10 +24,13 @@ echo "Adding MemryX GPG key and repository..."
|
||||
wget -qO- https://developer.memryx.com/deb/memryx.asc | sudo tee /etc/apt/trusted.gpg.d/memryx.asc >/dev/null
|
||||
echo 'deb https://developer.memryx.com/deb stable main' | sudo tee /etc/apt/sources.list.d/memryx.list >/dev/null
|
||||
|
||||
# Update and install memx-drivers
|
||||
echo "Installing memx-drivers..."
|
||||
# Update and install specific SDK 2.1 packages
|
||||
echo "Installing MemryX SDK 2.1 packages..."
|
||||
sudo apt update
|
||||
sudo apt install -y memx-drivers
|
||||
sudo apt install -y memx-drivers=2.1.* memx-accl=2.1.* mxa-manager=2.1.*
|
||||
|
||||
# Hold packages to prevent automatic upgrades
|
||||
sudo apt-mark hold memx-drivers memx-accl mxa-manager
|
||||
|
||||
# ARM-specific board setup
|
||||
if [[ "$arch" == "aarch64" || "$arch" == "arm64" ]]; then
|
||||
@ -37,11 +40,5 @@ fi
|
||||
|
||||
echo -e "\n\n\033[1;31mYOU MUST RESTART YOUR COMPUTER NOW\033[0m\n\n"
|
||||
|
||||
# Install other runtime packages
|
||||
packages=("memx-accl" "mxa-manager")
|
||||
for pkg in "${packages[@]}"; do
|
||||
echo "Installing $pkg..."
|
||||
sudo apt install -y "$pkg"
|
||||
done
|
||||
echo "MemryX SDK 2.1 installation complete!"
|
||||
|
||||
echo "MemryX installation complete!"
|
||||
|
||||
@ -21,7 +21,7 @@ FROM deps AS frigate-tensorrt
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
pip3 uninstall -y onnxruntime tensorflow-cpu \
|
||||
pip3 uninstall -y onnxruntime \
|
||||
&& pip3 install -U /deps/trt-wheels/*.whl
|
||||
|
||||
COPY --from=rootfs / /
|
||||
|
||||
@ -13,7 +13,6 @@ nvidia_cusolver_cu12==11.6.3.*; platform_machine == 'x86_64'
|
||||
nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64'
|
||||
nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64'
|
||||
nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64'
|
||||
tensorflow==2.19.*; platform_machine == 'x86_64'
|
||||
onnx==1.16.*; platform_machine == 'x86_64'
|
||||
onnxruntime-gpu==1.22.*; platform_machine == 'x86_64'
|
||||
protobuf==3.20.3; platform_machine == 'x86_64'
|
||||
|
||||
@ -1 +1,2 @@
|
||||
cuda-python == 12.6.*; platform_machine == 'aarch64'
|
||||
numpy == 1.26.*; platform_machine == 'aarch64'
|
||||
|
||||
@ -10,7 +10,6 @@ Object classification allows you to train a custom MobileNetV2 classification mo
|
||||
Object classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
|
||||
|
||||
Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
|
||||
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
|
||||
|
||||
## Classes
|
||||
|
||||
|
||||
@ -10,7 +10,6 @@ State classification allows you to train a custom MobileNetV2 classification mod
|
||||
State classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
|
||||
|
||||
Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
|
||||
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
|
||||
|
||||
## Classes
|
||||
|
||||
|
||||
@ -37,7 +37,6 @@ from frigate.stats.prometheus import get_metrics, update_metrics
|
||||
from frigate.util.builtin import (
|
||||
clean_camera_user_pass,
|
||||
flatten_config_data,
|
||||
get_tz_modifiers,
|
||||
process_config_query_string,
|
||||
update_yaml_file_bulk,
|
||||
)
|
||||
@ -48,6 +47,7 @@ from frigate.util.services import (
|
||||
restart_frigate,
|
||||
vainfo_hwaccel,
|
||||
)
|
||||
from frigate.util.time import get_tz_modifiers
|
||||
from frigate.version import VERSION
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -403,12 +403,13 @@ def config_set(request: Request, body: AppConfigSetBody):
|
||||
settings,
|
||||
)
|
||||
else:
|
||||
# Handle nested config updates (e.g., config/classification/custom/{name})
|
||||
# Generic handling for global config updates
|
||||
settings = config.get_nested_object(body.update_topic)
|
||||
if settings:
|
||||
request.app.config_publisher.publisher.publish(
|
||||
body.update_topic, settings
|
||||
)
|
||||
|
||||
# Publish None for removal, actual config for add/update
|
||||
request.app.config_publisher.publisher.publish(
|
||||
body.update_topic, settings
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content=(
|
||||
|
||||
@ -31,7 +31,7 @@ from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera import DetectConfig
|
||||
from frigate.const import CLIPS_DIR, FACE_DIR
|
||||
from frigate.const import CLIPS_DIR, FACE_DIR, MODEL_CACHE_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Event
|
||||
from frigate.util.classification import (
|
||||
@ -804,3 +804,46 @@ async def generate_object_examples(request: Request, body: GenerateObjectExample
|
||||
content={"success": True, "message": "Example generation completed"},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/classification/{name}",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Delete a classification model",
|
||||
description="""Deletes a specific classification model and all its associated data.
|
||||
The name must exist in the classification models. Returns a success message or an error if the name is invalid.""",
|
||||
)
|
||||
def delete_classification_model(request: Request, name: str):
|
||||
config: FrigateConfig = request.app.frigate_config
|
||||
|
||||
if name not in config.classification.custom:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"{name} is not a known classification model.",
|
||||
}
|
||||
),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
# Delete the classification model's data directory in clips
|
||||
data_dir = os.path.join(CLIPS_DIR, sanitize_filename(name))
|
||||
if os.path.exists(data_dir):
|
||||
shutil.rmtree(data_dir)
|
||||
|
||||
# Delete the classification model's files in model_cache
|
||||
model_dir = os.path.join(MODEL_CACHE_DIR, sanitize_filename(name))
|
||||
if os.path.exists(model_dir):
|
||||
shutil.rmtree(model_dir)
|
||||
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"Successfully deleted classification model {name}.",
|
||||
}
|
||||
),
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
@ -57,8 +57,8 @@ from frigate.const import CLIPS_DIR, TRIGGER_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Event, ReviewSegment, Timeline, Trigger
|
||||
from frigate.track.object_processing import TrackedObject
|
||||
from frigate.util.builtin import get_tz_modifiers
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
from frigate.util.time import get_tz_modifiers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -34,7 +34,7 @@ from frigate.record.export import (
|
||||
PlaybackSourceEnum,
|
||||
RecordingExporter,
|
||||
)
|
||||
from frigate.util.builtin import is_current_hour
|
||||
from frigate.util.time import is_current_hour
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -44,9 +44,9 @@ from frigate.const import (
|
||||
)
|
||||
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
|
||||
from frigate.track.object_processing import TrackedObjectProcessor
|
||||
from frigate.util.builtin import get_tz_modifiers
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
from frigate.util.time import get_tz_modifiers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -36,7 +36,7 @@ from frigate.config import FrigateConfig
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Recordings, ReviewSegment, UserReviewStatus
|
||||
from frigate.review.types import SeverityEnum
|
||||
from frigate.util.builtin import get_tz_modifiers
|
||||
from frigate.util.time import get_dst_transitions, get_tz_modifiers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -329,89 +329,135 @@ async def review_summary(
|
||||
)
|
||||
clauses.append(reduce(operator.or_, label_clauses))
|
||||
|
||||
day_in_seconds = 60 * 60 * 24
|
||||
last_month_query = (
|
||||
# Find the time range of available data
|
||||
time_range_query = (
|
||||
ReviewSegment.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
ReviewSegment.start_time,
|
||||
"unixepoch",
|
||||
hour_modifier,
|
||||
minute_modifier,
|
||||
),
|
||||
).alias("day"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.alert)
|
||||
& (UserReviewStatus.has_been_reviewed == True),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("reviewed_alert"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.detection)
|
||||
& (UserReviewStatus.has_been_reviewed == True),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("reviewed_detection"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.alert),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("total_alert"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.detection),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("total_detection"),
|
||||
)
|
||||
.left_outer_join(
|
||||
UserReviewStatus,
|
||||
on=(
|
||||
(ReviewSegment.id == UserReviewStatus.review_segment)
|
||||
& (UserReviewStatus.user_id == user_id)
|
||||
),
|
||||
fn.MIN(ReviewSegment.start_time).alias("min_time"),
|
||||
fn.MAX(ReviewSegment.start_time).alias("max_time"),
|
||||
)
|
||||
.where(reduce(operator.and_, clauses) if clauses else True)
|
||||
.group_by(
|
||||
(ReviewSegment.start_time + seconds_offset).cast("int") / day_in_seconds
|
||||
)
|
||||
.order_by(ReviewSegment.start_time.desc())
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
data = {
|
||||
"last24Hours": last_24_query,
|
||||
}
|
||||
|
||||
for e in last_month_query.dicts().iterator():
|
||||
data[e["day"]] = e
|
||||
# If no data, return early
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content=data)
|
||||
|
||||
# Get DST transition periods
|
||||
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
||||
|
||||
day_in_seconds = 60 * 60 * 24
|
||||
|
||||
# Query each DST period separately with the correct offset
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
# Calculate hour/minute modifiers for this period
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
# Build clauses including time range for this period
|
||||
period_clauses = clauses.copy()
|
||||
period_clauses.append(
|
||||
(ReviewSegment.start_time >= period_start)
|
||||
& (ReviewSegment.start_time <= period_end)
|
||||
)
|
||||
|
||||
period_query = (
|
||||
ReviewSegment.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
ReviewSegment.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("day"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.alert)
|
||||
& (UserReviewStatus.has_been_reviewed == True),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("reviewed_alert"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.detection)
|
||||
& (UserReviewStatus.has_been_reviewed == True),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("reviewed_detection"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.alert),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("total_alert"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.detection),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("total_detection"),
|
||||
)
|
||||
.left_outer_join(
|
||||
UserReviewStatus,
|
||||
on=(
|
||||
(ReviewSegment.id == UserReviewStatus.review_segment)
|
||||
& (UserReviewStatus.user_id == user_id)
|
||||
),
|
||||
)
|
||||
.where(reduce(operator.and_, period_clauses))
|
||||
.group_by(
|
||||
(ReviewSegment.start_time + period_offset).cast("int") / day_in_seconds
|
||||
)
|
||||
.order_by(ReviewSegment.start_time.desc())
|
||||
)
|
||||
|
||||
# Merge results from this period
|
||||
for e in period_query.dicts().iterator():
|
||||
day_key = e["day"]
|
||||
if day_key in data:
|
||||
# Merge counts if day already exists (edge case at DST boundary)
|
||||
data[day_key]["reviewed_alert"] += e["reviewed_alert"] or 0
|
||||
data[day_key]["reviewed_detection"] += e["reviewed_detection"] or 0
|
||||
data[day_key]["total_alert"] += e["total_alert"] or 0
|
||||
data[day_key]["total_detection"] += e["total_detection"] or 0
|
||||
else:
|
||||
data[day_key] = e
|
||||
|
||||
return JSONResponse(content=data)
|
||||
|
||||
|
||||
@ -14,8 +14,8 @@ from typing import Any, List, Optional, Tuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from Levenshtein import distance, jaro_winkler
|
||||
from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
|
||||
from rapidfuzz.distance import JaroWinkler, Levenshtein
|
||||
from shapely.geometry import Polygon
|
||||
|
||||
from frigate.comms.event_metadata_updater import (
|
||||
@ -1123,7 +1123,9 @@ class LicensePlateProcessingMixin:
|
||||
for i, plate in enumerate(plates):
|
||||
merged = False
|
||||
for j, cluster in enumerate(clusters):
|
||||
sims = [jaro_winkler(plate["plate"], v["plate"]) for v in cluster]
|
||||
sims = [
|
||||
JaroWinkler.similarity(plate["plate"], v["plate"]) for v in cluster
|
||||
]
|
||||
if len(sims) > 0:
|
||||
avg_sim = sum(sims) / len(sims)
|
||||
if avg_sim >= self.cluster_threshold:
|
||||
@ -1500,7 +1502,7 @@ class LicensePlateProcessingMixin:
|
||||
and current_time - data["last_seen"]
|
||||
<= self.config.cameras[camera].lpr.expire_time
|
||||
):
|
||||
similarity = jaro_winkler(data["plate"], top_plate)
|
||||
similarity = JaroWinkler.similarity(data["plate"], top_plate)
|
||||
if similarity >= self.similarity_threshold:
|
||||
plate_id = existing_id
|
||||
logger.debug(
|
||||
@ -1580,7 +1582,8 @@ class LicensePlateProcessingMixin:
|
||||
for label, plates_list in self.lpr_config.known_plates.items()
|
||||
if any(
|
||||
re.match(f"^{plate}$", rep_plate)
|
||||
or distance(plate, rep_plate) <= self.lpr_config.match_distance
|
||||
or Levenshtein.distance(plate, rep_plate)
|
||||
<= self.lpr_config.match_distance
|
||||
for plate in plates_list
|
||||
)
|
||||
),
|
||||
|
||||
@ -166,6 +166,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
camera = obj_data["camera"]
|
||||
|
||||
if not self.config.cameras[camera].face_recognition.enabled:
|
||||
logger.debug(f"Face recognition disabled for camera {camera}, skipping")
|
||||
return
|
||||
|
||||
start = datetime.datetime.now().timestamp()
|
||||
@ -208,6 +209,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
person_box = obj_data.get("box")
|
||||
|
||||
if not person_box:
|
||||
logger.debug(f"No person box available for {id}")
|
||||
return
|
||||
|
||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||
@ -233,7 +235,8 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
|
||||
try:
|
||||
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to convert face frame color for {id}: {e}")
|
||||
return
|
||||
else:
|
||||
# don't run for object without attributes
|
||||
@ -251,6 +254,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
|
||||
# no faces detected in this frame
|
||||
if not face:
|
||||
logger.debug(f"No face attributes found for {id}")
|
||||
return
|
||||
|
||||
face_box = face.get("box")
|
||||
@ -274,6 +278,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
res = self.recognizer.classify(face_frame)
|
||||
|
||||
if not res:
|
||||
logger.debug(f"Face recognizer returned no result for {id}")
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
||||
return
|
||||
|
||||
@ -330,6 +335,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
def handle_request(self, topic, request_data) -> dict[str, Any] | None:
|
||||
if topic == EmbeddingsRequestEnum.clear_face_classifier.value:
|
||||
self.recognizer.clear()
|
||||
return {"success": True, "message": "Face classifier cleared."}
|
||||
elif topic == EmbeddingsRequestEnum.recognize_face.value:
|
||||
img = cv2.imdecode(
|
||||
np.frombuffer(base64.b64decode(request_data["image"]), dtype=np.uint8),
|
||||
|
||||
@ -158,11 +158,13 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self.realtime_processors: list[RealTimeProcessorApi] = []
|
||||
|
||||
if self.config.face_recognition.enabled:
|
||||
logger.debug("Face recognition enabled, initializing FaceRealTimeProcessor")
|
||||
self.realtime_processors.append(
|
||||
FaceRealTimeProcessor(
|
||||
self.config, self.requestor, self.event_metadata_publisher, metrics
|
||||
)
|
||||
)
|
||||
logger.debug("FaceRealTimeProcessor initialized successfully")
|
||||
|
||||
if self.config.classification.bird.enabled:
|
||||
self.realtime_processors.append(
|
||||
@ -283,44 +285,65 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
logger.info("Exiting embeddings maintenance...")
|
||||
|
||||
def _check_classification_config_updates(self) -> None:
|
||||
"""Check for classification config updates and add new processors."""
|
||||
"""Check for classification config updates and add/remove processors."""
|
||||
topic, model_config = self.classification_config_subscriber.check_for_update()
|
||||
|
||||
if topic and model_config:
|
||||
if topic:
|
||||
model_name = topic.split("/")[-1]
|
||||
self.config.classification.custom[model_name] = model_config
|
||||
|
||||
# Check if processor already exists
|
||||
for processor in self.realtime_processors:
|
||||
if isinstance(
|
||||
processor,
|
||||
(
|
||||
CustomStateClassificationProcessor,
|
||||
CustomObjectClassificationProcessor,
|
||||
),
|
||||
):
|
||||
if processor.model_config.name == model_name:
|
||||
logger.debug(
|
||||
f"Classification processor for model {model_name} already exists, skipping"
|
||||
if model_config is None:
|
||||
self.realtime_processors = [
|
||||
processor
|
||||
for processor in self.realtime_processors
|
||||
if not (
|
||||
isinstance(
|
||||
processor,
|
||||
(
|
||||
CustomStateClassificationProcessor,
|
||||
CustomObjectClassificationProcessor,
|
||||
),
|
||||
)
|
||||
return
|
||||
and processor.model_config.name == model_name
|
||||
)
|
||||
]
|
||||
|
||||
if model_config.state_config is not None:
|
||||
processor = CustomStateClassificationProcessor(
|
||||
self.config, model_config, self.requestor, self.metrics
|
||||
logger.info(
|
||||
f"Successfully removed classification processor for model: {model_name}"
|
||||
)
|
||||
else:
|
||||
processor = CustomObjectClassificationProcessor(
|
||||
self.config,
|
||||
model_config,
|
||||
self.event_metadata_publisher,
|
||||
self.metrics,
|
||||
)
|
||||
self.config.classification.custom[model_name] = model_config
|
||||
|
||||
self.realtime_processors.append(processor)
|
||||
logger.info(
|
||||
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
|
||||
)
|
||||
# Check if processor already exists
|
||||
for processor in self.realtime_processors:
|
||||
if isinstance(
|
||||
processor,
|
||||
(
|
||||
CustomStateClassificationProcessor,
|
||||
CustomObjectClassificationProcessor,
|
||||
),
|
||||
):
|
||||
if processor.model_config.name == model_name:
|
||||
logger.debug(
|
||||
f"Classification processor for model {model_name} already exists, skipping"
|
||||
)
|
||||
return
|
||||
|
||||
if model_config.state_config is not None:
|
||||
processor = CustomStateClassificationProcessor(
|
||||
self.config, model_config, self.requestor, self.metrics
|
||||
)
|
||||
else:
|
||||
processor = CustomObjectClassificationProcessor(
|
||||
self.config,
|
||||
model_config,
|
||||
self.event_metadata_publisher,
|
||||
self.metrics,
|
||||
)
|
||||
|
||||
self.realtime_processors.append(processor)
|
||||
logger.info(
|
||||
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
|
||||
)
|
||||
|
||||
def _process_requests(self) -> None:
|
||||
"""Process embeddings requests"""
|
||||
|
||||
@ -114,7 +114,7 @@ Your response MUST be a flat JSON object with:
|
||||
|
||||
## Objects in Scene
|
||||
|
||||
Each line represents a detection state, not necessarily unique individuals. Objects with names in parentheses (e.g., "Name (person)") are verified identities. Objects without names (e.g., "Person") are detected but not identified.
|
||||
Each line represents a detection state, not necessarily unique individuals. Parentheses indicate object type or category, use only the name/label in your response, not the parentheses.
|
||||
|
||||
**CRITICAL: When you see both recognized and unrecognized entries of the same type (e.g., "Joe (person)" and "Person"), visually count how many distinct people/objects you actually see based on appearance and clothing. If you observe only ONE person throughout the sequence, use ONLY the recognized name (e.g., "Joe"). The same person may be recognized in some frames but not others. Only describe both if you visually see MULTIPLE distinct people with clearly different appearances.**
|
||||
|
||||
|
||||
@ -18,6 +18,7 @@ class OpenAIClient(GenAIClient):
|
||||
"""Generative AI client for Frigate using OpenAI."""
|
||||
|
||||
provider: OpenAI
|
||||
context_size: Optional[int] = None
|
||||
|
||||
def _init_provider(self):
|
||||
"""Initialize the client."""
|
||||
@ -69,5 +70,33 @@ class OpenAIClient(GenAIClient):
|
||||
|
||||
def get_context_size(self) -> int:
|
||||
"""Get the context window size for OpenAI."""
|
||||
# OpenAI GPT-4 Vision models have 128K token context window
|
||||
return 128000
|
||||
if self.context_size is not None:
|
||||
return self.context_size
|
||||
|
||||
try:
|
||||
models = self.provider.models.list()
|
||||
for model in models.data:
|
||||
if model.id == self.genai_config.model:
|
||||
if hasattr(model, "max_model_len") and model.max_model_len:
|
||||
self.context_size = model.max_model_len
|
||||
logger.debug(
|
||||
f"Retrieved context size {self.context_size} for model {self.genai_config.model}"
|
||||
)
|
||||
return self.context_size
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
f"Failed to fetch model context size from API: {e}, using default"
|
||||
)
|
||||
|
||||
# Default to 128K for ChatGPT models, 8K for others
|
||||
model_name = self.genai_config.model.lower()
|
||||
if "gpt" in model_name:
|
||||
self.context_size = 128000
|
||||
else:
|
||||
self.context_size = 8192
|
||||
|
||||
logger.debug(
|
||||
f"Using default context size {self.context_size} for model {self.genai_config.model}"
|
||||
)
|
||||
return self.context_size
|
||||
|
||||
@ -14,7 +14,8 @@ from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR
|
||||
from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus
|
||||
from frigate.record.util import remove_empty_directories, sync_recordings
|
||||
from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time
|
||||
from frigate.util.builtin import clear_and_unlink
|
||||
from frigate.util.time import get_tomorrow_at_time
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -28,7 +28,7 @@ from frigate.ffmpeg_presets import (
|
||||
parse_preset_hardware_acceleration_encode,
|
||||
)
|
||||
from frigate.models import Export, Previews, Recordings
|
||||
from frigate.util.builtin import is_current_hour
|
||||
from frigate.util.time import is_current_hour
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -15,12 +15,9 @@ from collections.abc import Mapping
|
||||
from multiprocessing.sharedctypes import Synchronized
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Tuple, Union
|
||||
from zoneinfo import ZoneInfoNotFoundError
|
||||
|
||||
import numpy as np
|
||||
import pytz
|
||||
from ruamel.yaml import YAML
|
||||
from tzlocal import get_localzone
|
||||
|
||||
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
|
||||
|
||||
@ -157,17 +154,6 @@ def load_labels(path: Optional[str], encoding="utf-8", prefill=91):
|
||||
return labels
|
||||
|
||||
|
||||
def get_tz_modifiers(tz_name: str) -> Tuple[str, str, float]:
|
||||
seconds_offset = (
|
||||
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
|
||||
)
|
||||
hours_offset = int(seconds_offset / 60 / 60)
|
||||
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
|
||||
hour_modifier = f"{hours_offset} hour"
|
||||
minute_modifier = f"{minutes_offset} minute"
|
||||
return hour_modifier, minute_modifier, seconds_offset
|
||||
|
||||
|
||||
def to_relative_box(
|
||||
width: int, height: int, box: Tuple[int, int, int, int]
|
||||
) -> Tuple[int | float, int | float, int | float, int | float]:
|
||||
@ -298,34 +284,6 @@ def find_by_key(dictionary, target_key):
|
||||
return None
|
||||
|
||||
|
||||
def get_tomorrow_at_time(hour: int) -> datetime.datetime:
|
||||
"""Returns the datetime of the following day at 2am."""
|
||||
try:
|
||||
tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1)
|
||||
except ZoneInfoNotFoundError:
|
||||
tomorrow = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
|
||||
days=1
|
||||
)
|
||||
logger.warning(
|
||||
"Using utc for maintenance due to missing or incorrect timezone set"
|
||||
)
|
||||
|
||||
return tomorrow.replace(hour=hour, minute=0, second=0).astimezone(
|
||||
datetime.timezone.utc
|
||||
)
|
||||
|
||||
|
||||
def is_current_hour(timestamp: int) -> bool:
|
||||
"""Returns if timestamp is in the current UTC hour."""
|
||||
start_of_next_hour = (
|
||||
datetime.datetime.now(datetime.timezone.utc).replace(
|
||||
minute=0, second=0, microsecond=0
|
||||
)
|
||||
+ datetime.timedelta(hours=1)
|
||||
).timestamp()
|
||||
return timestamp < start_of_next_hour
|
||||
|
||||
|
||||
def clear_and_unlink(file: Path, missing_ok: bool = True) -> None:
|
||||
"""clear file then unlink to avoid space retained by file descriptors."""
|
||||
if not missing_ok and not file.exists():
|
||||
|
||||
@ -384,10 +384,10 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]
|
||||
new_object_config["genai"] = {}
|
||||
|
||||
for key in global_genai.keys():
|
||||
if key not in ["enabled", "model", "provider", "base_url", "api_key"]:
|
||||
new_object_config["genai"][key] = global_genai[key]
|
||||
else:
|
||||
if key in ["model", "provider", "base_url", "api_key"]:
|
||||
new_genai_config[key] = global_genai[key]
|
||||
else:
|
||||
new_object_config["genai"][key] = global_genai[key]
|
||||
|
||||
config["genai"] = new_genai_config
|
||||
|
||||
|
||||
100
frigate/util/time.py
Normal file
100
frigate/util/time.py
Normal file
@ -0,0 +1,100 @@
|
||||
"""Time utilities."""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
from typing import Tuple
|
||||
from zoneinfo import ZoneInfoNotFoundError
|
||||
|
||||
import pytz
|
||||
from tzlocal import get_localzone
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_tz_modifiers(tz_name: str) -> Tuple[str, str, float]:
|
||||
seconds_offset = (
|
||||
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
|
||||
)
|
||||
hours_offset = int(seconds_offset / 60 / 60)
|
||||
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
|
||||
hour_modifier = f"{hours_offset} hour"
|
||||
minute_modifier = f"{minutes_offset} minute"
|
||||
return hour_modifier, minute_modifier, seconds_offset
|
||||
|
||||
|
||||
def get_tomorrow_at_time(hour: int) -> datetime.datetime:
|
||||
"""Returns the datetime of the following day at 2am."""
|
||||
try:
|
||||
tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1)
|
||||
except ZoneInfoNotFoundError:
|
||||
tomorrow = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
|
||||
days=1
|
||||
)
|
||||
logger.warning(
|
||||
"Using utc for maintenance due to missing or incorrect timezone set"
|
||||
)
|
||||
|
||||
return tomorrow.replace(hour=hour, minute=0, second=0).astimezone(
|
||||
datetime.timezone.utc
|
||||
)
|
||||
|
||||
|
||||
def is_current_hour(timestamp: int) -> bool:
|
||||
"""Returns if timestamp is in the current UTC hour."""
|
||||
start_of_next_hour = (
|
||||
datetime.datetime.now(datetime.timezone.utc).replace(
|
||||
minute=0, second=0, microsecond=0
|
||||
)
|
||||
+ datetime.timedelta(hours=1)
|
||||
).timestamp()
|
||||
return timestamp < start_of_next_hour
|
||||
|
||||
|
||||
def get_dst_transitions(
|
||||
tz_name: str, start_time: float, end_time: float
|
||||
) -> list[tuple[float, float]]:
|
||||
"""
|
||||
Find DST transition points and return time periods with consistent offsets.
|
||||
|
||||
Args:
|
||||
tz_name: Timezone name (e.g., 'America/New_York')
|
||||
start_time: Start timestamp (UTC)
|
||||
end_time: End timestamp (UTC)
|
||||
|
||||
Returns:
|
||||
List of (period_start, period_end, seconds_offset) tuples representing
|
||||
continuous periods with the same UTC offset
|
||||
"""
|
||||
try:
|
||||
tz = pytz.timezone(tz_name)
|
||||
except pytz.UnknownTimeZoneError:
|
||||
# If timezone is invalid, return single period with no offset
|
||||
return [(start_time, end_time, 0)]
|
||||
|
||||
periods = []
|
||||
current = start_time
|
||||
|
||||
# Get initial offset
|
||||
dt = datetime.datetime.utcfromtimestamp(current).replace(tzinfo=pytz.UTC)
|
||||
local_dt = dt.astimezone(tz)
|
||||
prev_offset = local_dt.utcoffset().total_seconds()
|
||||
period_start = start_time
|
||||
|
||||
# Check each day for offset changes
|
||||
while current <= end_time:
|
||||
dt = datetime.datetime.utcfromtimestamp(current).replace(tzinfo=pytz.UTC)
|
||||
local_dt = dt.astimezone(tz)
|
||||
current_offset = local_dt.utcoffset().total_seconds()
|
||||
|
||||
if current_offset != prev_offset:
|
||||
# Found a transition - close previous period
|
||||
periods.append((period_start, current, prev_offset))
|
||||
period_start = current
|
||||
prev_offset = current_offset
|
||||
|
||||
current += 86400 # Check daily
|
||||
|
||||
# Add final period
|
||||
periods.append((period_start, end_time, prev_offset))
|
||||
|
||||
return periods
|
||||
@ -34,7 +34,7 @@ from frigate.ptz.autotrack import ptz_moving_at_frame_time
|
||||
from frigate.track import ObjectTracker
|
||||
from frigate.track.norfair_tracker import NorfairTracker
|
||||
from frigate.track.tracked_object import TrackedObjectAttribute
|
||||
from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time
|
||||
from frigate.util.builtin import EventsPerSecond
|
||||
from frigate.util.image import (
|
||||
FrameManager,
|
||||
SharedMemoryFrameManager,
|
||||
@ -53,6 +53,7 @@ from frigate.util.object import (
|
||||
reduce_detections,
|
||||
)
|
||||
from frigate.util.process import FrigateProcess
|
||||
from frigate.util.time import get_tomorrow_at_time
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -100,7 +100,8 @@
|
||||
},
|
||||
"list": {
|
||||
"two": "{{0}} and {{1}}",
|
||||
"many": "{{items}}, and {{last}}"
|
||||
"many": "{{items}}, and {{last}}",
|
||||
"separatorWithSpace": ", "
|
||||
},
|
||||
"field": {
|
||||
"optional": "Optional",
|
||||
|
||||
@ -5,12 +5,15 @@
|
||||
"renameCategory": "Rename Class",
|
||||
"deleteCategory": "Delete Class",
|
||||
"deleteImages": "Delete Images",
|
||||
"trainModel": "Train Model"
|
||||
"trainModel": "Train Model",
|
||||
"addClassification": "Add Classification",
|
||||
"deleteModels": "Delete Models"
|
||||
},
|
||||
"toast": {
|
||||
"success": {
|
||||
"deletedCategory": "Deleted Class",
|
||||
"deletedImage": "Deleted Images",
|
||||
"deletedModel": "Successfully deleted {{count}} model(s)",
|
||||
"categorizedImage": "Successfully Classified Image",
|
||||
"trainedModel": "Successfully trained model.",
|
||||
"trainingModel": "Successfully started model training."
|
||||
@ -18,6 +21,7 @@
|
||||
"error": {
|
||||
"deleteImageFailed": "Failed to delete: {{errorMessage}}",
|
||||
"deleteCategoryFailed": "Failed to delete class: {{errorMessage}}",
|
||||
"deleteModelFailed": "Failed to delete model: {{errorMessage}}",
|
||||
"categorizeFailed": "Failed to categorize image: {{errorMessage}}",
|
||||
"trainingFailed": "Failed to start model training: {{errorMessage}}"
|
||||
}
|
||||
@ -26,6 +30,11 @@
|
||||
"title": "Delete Class",
|
||||
"desc": "Are you sure you want to delete the class {{name}}? This will permanently delete all associated images and require re-training the model."
|
||||
},
|
||||
"deleteModel": {
|
||||
"title": "Delete Classification Model",
|
||||
"single": "Are you sure you want to delete {{name}}? This will permanently delete all associated data including images and training data. This action cannot be undone.",
|
||||
"desc": "Are you sure you want to delete {{count}} model(s)? This will permanently delete all associated data including images and training data. This action cannot be undone."
|
||||
},
|
||||
"deleteDatasetImages": {
|
||||
"title": "Delete Dataset Images",
|
||||
"desc": "Are you sure you want to delete {{count}} images from {{dataset}}? This action cannot be undone and will require re-training the model."
|
||||
@ -52,6 +61,10 @@
|
||||
},
|
||||
"categorizeImageAs": "Classify Image As:",
|
||||
"categorizeImage": "Classify Image",
|
||||
"menu": {
|
||||
"objects": "Objects",
|
||||
"states": "States"
|
||||
},
|
||||
"noModels": {
|
||||
"object": {
|
||||
"title": "No Object Classification Models",
|
||||
@ -86,6 +99,7 @@
|
||||
"classificationSubLabel": "Sub Label",
|
||||
"classificationAttribute": "Attribute",
|
||||
"classes": "Classes",
|
||||
"states": "States",
|
||||
"classesTip": "Learn about classes",
|
||||
"classesStateDesc": "Define the different states your camera area can be in. For example: 'open' and 'closed' for a garage door.",
|
||||
"classesObjectDesc": "Define the different categories to classify detected objects into. For example: 'delivery_person', 'resident', 'stranger' for person classification.",
|
||||
|
||||
@ -33,6 +33,7 @@
|
||||
"type": {
|
||||
"details": "details",
|
||||
"snapshot": "snapshot",
|
||||
"thumbnail": "thumbnail",
|
||||
"video": "video",
|
||||
"object_lifecycle": "object lifecycle"
|
||||
},
|
||||
@ -41,7 +42,7 @@
|
||||
"noImageFound": "No image found for this timestamp.",
|
||||
"createObjectMask": "Create Object Mask",
|
||||
"adjustAnnotationSettings": "Adjust annotation settings",
|
||||
"scrollViewTips": "Scroll to view the significant moments of this object's lifecycle.",
|
||||
"scrollViewTips": "Click to view the significant moments of this object's lifecycle.",
|
||||
"autoTrackingTips": "Bounding box positions will be inaccurate for autotracking cameras.",
|
||||
"count": "{{first}} of {{second}}",
|
||||
"trackedPoint": "Tracked Point",
|
||||
|
||||
@ -271,6 +271,8 @@
|
||||
"disconnectStream": "Disconnect",
|
||||
"estimatedBandwidth": "Estimated Bandwidth",
|
||||
"roles": "Roles",
|
||||
"ffmpegModule": "Use stream compatibility mode",
|
||||
"ffmpegModuleDescription": "If the stream does not load after several attempts, try enabling this. When enabled, Frigate will use the ffmpeg module with go2rtc. This may provide better compatibility with some camera streams.",
|
||||
"none": "None",
|
||||
"error": "Error",
|
||||
"streamValidated": "Stream {{number}} validated successfully",
|
||||
|
||||
@ -394,7 +394,9 @@ export default function Step1NameAndDefine({
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-1">
|
||||
<FormLabel className="text-primary-variant">
|
||||
{t("wizard.step1.classes")}
|
||||
{watchedModelType === "state"
|
||||
? t("wizard.step1.states")
|
||||
: t("wizard.step1.classes")}
|
||||
</FormLabel>
|
||||
<Popover>
|
||||
<PopoverTrigger asChild>
|
||||
|
||||
@ -317,6 +317,21 @@ export default function Step3ChooseExamples({
|
||||
return unclassifiedImages.length === 0;
|
||||
}, [unclassifiedImages]);
|
||||
|
||||
const handleBack = useCallback(() => {
|
||||
if (currentClassIndex > 0) {
|
||||
const previousClass = allClasses[currentClassIndex - 1];
|
||||
setCurrentClassIndex((prev) => prev - 1);
|
||||
|
||||
// Restore selections for the previous class
|
||||
const previousSelections = Object.entries(imageClassifications)
|
||||
.filter(([_, className]) => className === previousClass)
|
||||
.map(([imageName, _]) => imageName);
|
||||
setSelectedImages(new Set(previousSelections));
|
||||
} else {
|
||||
onBack();
|
||||
}
|
||||
}, [currentClassIndex, allClasses, imageClassifications, onBack]);
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-6">
|
||||
{isTraining ? (
|
||||
@ -420,7 +435,7 @@ export default function Step3ChooseExamples({
|
||||
|
||||
{!isTraining && (
|
||||
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
||||
<Button type="button" onClick={onBack} className="sm:flex-1">
|
||||
<Button type="button" onClick={handleBack} className="sm:flex-1">
|
||||
{t("button.back", { ns: "common" })}
|
||||
</Button>
|
||||
<Button
|
||||
|
||||
@ -348,6 +348,26 @@ export function GeneralFilterContent({
|
||||
onClose,
|
||||
}: GeneralFilterContentProps) {
|
||||
const { t } = useTranslation(["components/filter"]);
|
||||
const { data: config } = useSWR<FrigateConfig>("config", {
|
||||
revalidateOnFocus: false,
|
||||
});
|
||||
|
||||
const allAudioListenLabels = useMemo<string[]>(() => {
|
||||
if (!config) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const labels = new Set<string>();
|
||||
Object.values(config.cameras).forEach((camera) => {
|
||||
if (camera?.audio?.enabled) {
|
||||
camera.audio.listen.forEach((label) => {
|
||||
labels.add(label);
|
||||
});
|
||||
}
|
||||
});
|
||||
return [...labels].sort();
|
||||
}, [config]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="overflow-x-hidden">
|
||||
@ -373,7 +393,10 @@ export function GeneralFilterContent({
|
||||
{allLabels.map((item) => (
|
||||
<FilterSwitch
|
||||
key={item}
|
||||
label={getTranslatedLabel(item)}
|
||||
label={getTranslatedLabel(
|
||||
item,
|
||||
allAudioListenLabels.includes(item) ? "audio" : "object",
|
||||
)}
|
||||
isChecked={currentLabels?.includes(item) ?? false}
|
||||
onCheckedChange={(isChecked) => {
|
||||
if (isChecked) {
|
||||
|
||||
@ -13,6 +13,9 @@ import { cn } from "@/lib/utils";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { Event } from "@/types/event";
|
||||
|
||||
// Use a small tolerance (10ms) for browsers with seek precision by-design issues
|
||||
const TOLERANCE = 0.01;
|
||||
|
||||
type ObjectTrackOverlayProps = {
|
||||
camera: string;
|
||||
showBoundingBoxes?: boolean;
|
||||
@ -55,6 +58,47 @@ export default function ObjectTrackOverlay({
|
||||
|
||||
const effectiveCurrentTime = currentTime - annotationOffset / 1000;
|
||||
|
||||
const {
|
||||
pathStroke,
|
||||
pointRadius,
|
||||
pointStroke,
|
||||
zoneStroke,
|
||||
boxStroke,
|
||||
highlightRadius,
|
||||
} = useMemo(() => {
|
||||
const BASE_WIDTH = 1280;
|
||||
const BASE_HEIGHT = 720;
|
||||
const BASE_PATH_STROKE = 5;
|
||||
const BASE_POINT_RADIUS = 7;
|
||||
const BASE_POINT_STROKE = 3;
|
||||
const BASE_ZONE_STROKE = 5;
|
||||
const BASE_BOX_STROKE = 5;
|
||||
const BASE_HIGHLIGHT_RADIUS = 5;
|
||||
|
||||
const scale = Math.sqrt(
|
||||
(videoWidth * videoHeight) / (BASE_WIDTH * BASE_HEIGHT),
|
||||
);
|
||||
|
||||
const pathStroke = Math.max(1, Math.round(BASE_PATH_STROKE * scale));
|
||||
const pointRadius = Math.max(2, Math.round(BASE_POINT_RADIUS * scale));
|
||||
const pointStroke = Math.max(1, Math.round(BASE_POINT_STROKE * scale));
|
||||
const zoneStroke = Math.max(1, Math.round(BASE_ZONE_STROKE * scale));
|
||||
const boxStroke = Math.max(1, Math.round(BASE_BOX_STROKE * scale));
|
||||
const highlightRadius = Math.max(
|
||||
2,
|
||||
Math.round(BASE_HIGHLIGHT_RADIUS * scale),
|
||||
);
|
||||
|
||||
return {
|
||||
pathStroke,
|
||||
pointRadius,
|
||||
pointStroke,
|
||||
zoneStroke,
|
||||
boxStroke,
|
||||
highlightRadius,
|
||||
};
|
||||
}, [videoWidth, videoHeight]);
|
||||
|
||||
// Fetch all event data in a single request (CSV ids)
|
||||
const { data: eventsData } = useSWR<Event[]>(
|
||||
selectedObjectIds.length > 0
|
||||
@ -166,41 +210,50 @@ export default function ObjectTrackOverlay({
|
||||
}) || [];
|
||||
|
||||
// show full path once current time has reached the object's start time
|
||||
const combinedPoints = [...savedPathPoints, ...eventSequencePoints]
|
||||
.sort((a, b) => a.timestamp - b.timestamp)
|
||||
.filter(
|
||||
(point) =>
|
||||
currentTime >= (eventData?.start_time ?? 0) &&
|
||||
point.timestamp >= (eventData?.start_time ?? 0) &&
|
||||
point.timestamp <= (eventData?.end_time ?? Infinity),
|
||||
);
|
||||
// event.start_time is in DETECT stream time, so convert it to record stream time for comparison
|
||||
const eventStartTimeRecord =
|
||||
(eventData?.start_time ?? 0) + annotationOffset / 1000;
|
||||
|
||||
const allPoints = [...savedPathPoints, ...eventSequencePoints].sort(
|
||||
(a, b) => a.timestamp - b.timestamp,
|
||||
);
|
||||
const combinedPoints = allPoints.filter(
|
||||
(point) =>
|
||||
currentTime >= eventStartTimeRecord - TOLERANCE &&
|
||||
point.timestamp <= effectiveCurrentTime + TOLERANCE,
|
||||
);
|
||||
|
||||
// Get color for this object
|
||||
const label = eventData?.label || "unknown";
|
||||
const color = getObjectColor(label, objectId);
|
||||
|
||||
// Get current zones
|
||||
// zones (with tolerance for browsers with seek precision by-design issues)
|
||||
const currentZones =
|
||||
timelineData
|
||||
?.filter(
|
||||
(event: TrackingDetailsSequence) =>
|
||||
event.timestamp <= effectiveCurrentTime,
|
||||
event.timestamp <= effectiveCurrentTime + TOLERANCE,
|
||||
)
|
||||
.sort(
|
||||
(a: TrackingDetailsSequence, b: TrackingDetailsSequence) =>
|
||||
b.timestamp - a.timestamp,
|
||||
)[0]?.data?.zones || [];
|
||||
|
||||
// Get current bounding box
|
||||
const currentBox = timelineData
|
||||
?.filter(
|
||||
(event: TrackingDetailsSequence) =>
|
||||
event.timestamp <= effectiveCurrentTime && event.data.box,
|
||||
)
|
||||
// bounding box - only show if there's a timeline event at/near the current time with a box
|
||||
// Search all timeline events (not just those before current time) to find one matching the seek position
|
||||
const nearbyTimelineEvent = timelineData
|
||||
?.filter((event: TrackingDetailsSequence) => event.data.box)
|
||||
.sort(
|
||||
(a: TrackingDetailsSequence, b: TrackingDetailsSequence) =>
|
||||
b.timestamp - a.timestamp,
|
||||
)[0]?.data?.box;
|
||||
Math.abs(a.timestamp - effectiveCurrentTime) -
|
||||
Math.abs(b.timestamp - effectiveCurrentTime),
|
||||
)
|
||||
.find(
|
||||
(event: TrackingDetailsSequence) =>
|
||||
Math.abs(event.timestamp - effectiveCurrentTime) <= TOLERANCE,
|
||||
);
|
||||
|
||||
const currentBox = nearbyTimelineEvent?.data?.box;
|
||||
|
||||
return {
|
||||
objectId,
|
||||
@ -221,6 +274,7 @@ export default function ObjectTrackOverlay({
|
||||
getObjectColor,
|
||||
config,
|
||||
camera,
|
||||
annotationOffset,
|
||||
]);
|
||||
|
||||
// Collect all zones across all objects
|
||||
@ -274,9 +328,10 @@ export default function ObjectTrackOverlay({
|
||||
|
||||
const handlePointClick = useCallback(
|
||||
(timestamp: number) => {
|
||||
onSeekToTime?.(timestamp, false);
|
||||
// Convert detect stream timestamp to record stream timestamp before seeking
|
||||
onSeekToTime?.(timestamp + annotationOffset / 1000, false);
|
||||
},
|
||||
[onSeekToTime],
|
||||
[onSeekToTime, annotationOffset],
|
||||
);
|
||||
|
||||
const zonePolygons = useMemo(() => {
|
||||
@ -324,7 +379,7 @@ export default function ObjectTrackOverlay({
|
||||
points={zone.points}
|
||||
fill={zone.fill}
|
||||
stroke={zone.stroke}
|
||||
strokeWidth="5"
|
||||
strokeWidth={zoneStroke}
|
||||
opacity="0.7"
|
||||
/>
|
||||
))}
|
||||
@ -344,7 +399,7 @@ export default function ObjectTrackOverlay({
|
||||
d={generateStraightPath(absolutePositions)}
|
||||
fill="none"
|
||||
stroke={objData.color}
|
||||
strokeWidth="5"
|
||||
strokeWidth={pathStroke}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
@ -356,13 +411,13 @@ export default function ObjectTrackOverlay({
|
||||
<circle
|
||||
cx={pos.x}
|
||||
cy={pos.y}
|
||||
r="7"
|
||||
r={pointRadius}
|
||||
fill={getPointColor(
|
||||
objData.color,
|
||||
pos.lifecycle_item?.class_type,
|
||||
)}
|
||||
stroke="white"
|
||||
strokeWidth="3"
|
||||
strokeWidth={pointStroke}
|
||||
style={{ cursor: onSeekToTime ? "pointer" : "default" }}
|
||||
onClick={() => handlePointClick(pos.timestamp)}
|
||||
/>
|
||||
@ -391,7 +446,7 @@ export default function ObjectTrackOverlay({
|
||||
height={objData.currentBox[3] * videoHeight}
|
||||
fill="none"
|
||||
stroke={objData.color}
|
||||
strokeWidth="5"
|
||||
strokeWidth={boxStroke}
|
||||
opacity="0.9"
|
||||
/>
|
||||
<circle
|
||||
@ -403,10 +458,10 @@ export default function ObjectTrackOverlay({
|
||||
(objData.currentBox[1] + objData.currentBox[3]) *
|
||||
videoHeight
|
||||
}
|
||||
r="5"
|
||||
r={highlightRadius}
|
||||
fill="rgb(255, 255, 0)" // yellow highlight
|
||||
stroke={objData.color}
|
||||
strokeWidth="5"
|
||||
strokeWidth={boxStroke}
|
||||
opacity="1"
|
||||
/>
|
||||
</g>
|
||||
|
||||
@ -91,8 +91,8 @@ export default function AnnotationOffsetSlider({ className }: Props) {
|
||||
<div className="w-full flex-1 landscape:flex">
|
||||
<Slider
|
||||
value={[annotationOffset]}
|
||||
min={-1500}
|
||||
max={1500}
|
||||
min={-2500}
|
||||
max={2500}
|
||||
step={50}
|
||||
onValueChange={handleChange}
|
||||
/>
|
||||
|
||||
@ -1,577 +0,0 @@
|
||||
import { isDesktop, isIOS, isMobile } from "react-device-detect";
|
||||
import {
|
||||
Sheet,
|
||||
SheetContent,
|
||||
SheetDescription,
|
||||
SheetHeader,
|
||||
SheetTitle,
|
||||
} from "../../ui/sheet";
|
||||
import useSWR from "swr";
|
||||
import { FrigateConfig } from "@/types/frigateConfig";
|
||||
import { useFormattedTimestamp } from "@/hooks/use-date-utils";
|
||||
import { getIconForLabel } from "@/utils/iconUtil";
|
||||
import { useApiHost } from "@/api";
|
||||
import {
|
||||
ReviewDetailPaneType,
|
||||
ReviewSegment,
|
||||
ThreatLevel,
|
||||
} from "@/types/review";
|
||||
import { Event } from "@/types/event";
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { FrigatePlusDialog } from "../dialog/FrigatePlusDialog";
|
||||
import TrackingDetails from "./TrackingDetails";
|
||||
import Chip from "@/components/indicators/Chip";
|
||||
import { FaDownload, FaImages, FaShareAlt } from "react-icons/fa";
|
||||
import FrigatePlusIcon from "@/components/icons/FrigatePlusIcon";
|
||||
import { FaArrowsRotate } from "react-icons/fa6";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip";
|
||||
import { useNavigate } from "react-router-dom";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { baseUrl } from "@/api/baseUrl";
|
||||
import { shareOrCopy } from "@/utils/browserUtil";
|
||||
import {
|
||||
MobilePage,
|
||||
MobilePageContent,
|
||||
MobilePageDescription,
|
||||
MobilePageHeader,
|
||||
MobilePageTitle,
|
||||
} from "@/components/mobile/MobilePage";
|
||||
import { DownloadVideoButton } from "@/components/button/DownloadVideoButton";
|
||||
import { TooltipPortal } from "@radix-ui/react-tooltip";
|
||||
import { LuSearch } from "react-icons/lu";
|
||||
import useKeyboardListener from "@/hooks/use-keyboard-listener";
|
||||
import { Trans, useTranslation } from "react-i18next";
|
||||
import { getTranslatedLabel } from "@/utils/i18n";
|
||||
import { CameraNameLabel } from "@/components/camera/CameraNameLabel";
|
||||
|
||||
type ReviewDetailDialogProps = {
|
||||
review?: ReviewSegment;
|
||||
setReview: (review: ReviewSegment | undefined) => void;
|
||||
};
|
||||
export default function ReviewDetailDialog({
|
||||
review,
|
||||
setReview,
|
||||
}: ReviewDetailDialogProps) {
|
||||
const { t } = useTranslation(["views/explore"]);
|
||||
const { data: config } = useSWR<FrigateConfig>("config", {
|
||||
revalidateOnFocus: false,
|
||||
});
|
||||
|
||||
const navigate = useNavigate();
|
||||
|
||||
// upload
|
||||
|
||||
const [upload, setUpload] = useState<Event>();
|
||||
|
||||
// data
|
||||
|
||||
const { data: events } = useSWR<Event[]>(
|
||||
review ? ["event_ids", { ids: review.data.detections.join(",") }] : null,
|
||||
);
|
||||
|
||||
const aiAnalysis = useMemo(() => review?.data?.metadata, [review]);
|
||||
|
||||
const aiThreatLevel = useMemo(() => {
|
||||
if (
|
||||
!aiAnalysis ||
|
||||
(!aiAnalysis.potential_threat_level && !aiAnalysis.other_concerns)
|
||||
) {
|
||||
return "None";
|
||||
}
|
||||
|
||||
let concerns = "";
|
||||
switch (aiAnalysis.potential_threat_level) {
|
||||
case ThreatLevel.SUSPICIOUS:
|
||||
concerns = `• ${t("suspiciousActivity", { ns: "views/events" })}\n`;
|
||||
break;
|
||||
case ThreatLevel.DANGER:
|
||||
concerns = `• ${t("threateningActivity", { ns: "views/events" })}\n`;
|
||||
break;
|
||||
}
|
||||
|
||||
(aiAnalysis.other_concerns ?? []).forEach((c) => {
|
||||
concerns += `• ${c}\n`;
|
||||
});
|
||||
|
||||
return concerns || "None";
|
||||
}, [aiAnalysis, t]);
|
||||
|
||||
const hasMismatch = useMemo(() => {
|
||||
if (!review || !events) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return events.length != review?.data.detections.length;
|
||||
}, [review, events]);
|
||||
|
||||
const missingObjects = useMemo(() => {
|
||||
if (!review || !events) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const detectedIds = review.data.detections;
|
||||
const missing = Array.from(
|
||||
new Set(
|
||||
events
|
||||
.filter((event) => !detectedIds.includes(event.id))
|
||||
.map((event) => event.label),
|
||||
),
|
||||
);
|
||||
|
||||
return missing;
|
||||
}, [review, events]);
|
||||
|
||||
const formattedDate = useFormattedTimestamp(
|
||||
review?.start_time ?? 0,
|
||||
config?.ui.time_format == "24hour"
|
||||
? t("time.formattedTimestampMonthDayYearHourMinute.24hour", {
|
||||
ns: "common",
|
||||
})
|
||||
: t("time.formattedTimestampMonthDayYearHourMinute.12hour", {
|
||||
ns: "common",
|
||||
}),
|
||||
config?.ui.timezone,
|
||||
);
|
||||
|
||||
// content
|
||||
|
||||
const [selectedEvent, setSelectedEvent] = useState<Event>();
|
||||
const [pane, setPane] = useState<ReviewDetailPaneType>("overview");
|
||||
|
||||
// dialog and mobile page
|
||||
|
||||
const [isOpen, setIsOpen] = useState(review != undefined);
|
||||
|
||||
const handleOpenChange = useCallback(
|
||||
(open: boolean) => {
|
||||
setIsOpen(open);
|
||||
if (!open) {
|
||||
// short timeout to allow the mobile page animation
|
||||
// to complete before updating the state
|
||||
setTimeout(() => {
|
||||
setReview(undefined);
|
||||
setSelectedEvent(undefined);
|
||||
setPane("overview");
|
||||
}, 300);
|
||||
}
|
||||
},
|
||||
[setReview, setIsOpen],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
setIsOpen(review != undefined);
|
||||
// we know that these deps are correct
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [review]);
|
||||
|
||||
// keyboard listener
|
||||
|
||||
useKeyboardListener(["Esc"], (key, modifiers) => {
|
||||
if (key == "Esc" && modifiers.down && !modifiers.repeat) {
|
||||
setIsOpen(false);
|
||||
}
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
const Overlay = isDesktop ? Sheet : MobilePage;
|
||||
const Content = isDesktop ? SheetContent : MobilePageContent;
|
||||
const Header = isDesktop ? SheetHeader : MobilePageHeader;
|
||||
const Title = isDesktop ? SheetTitle : MobilePageTitle;
|
||||
const Description = isDesktop ? SheetDescription : MobilePageDescription;
|
||||
|
||||
if (!review) {
|
||||
return;
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<Overlay
|
||||
open={isOpen ?? false}
|
||||
onOpenChange={handleOpenChange}
|
||||
enableHistoryBack={true}
|
||||
>
|
||||
<FrigatePlusDialog
|
||||
upload={upload}
|
||||
onClose={() => setUpload(undefined)}
|
||||
onEventUploaded={() => {
|
||||
if (upload) {
|
||||
upload.plus_id = "new_upload";
|
||||
}
|
||||
}}
|
||||
/>
|
||||
|
||||
<Content
|
||||
className={cn(
|
||||
"scrollbar-container overflow-y-auto",
|
||||
isDesktop && pane == "overview"
|
||||
? "sm:max-w-xl"
|
||||
: "pt-2 sm:max-w-4xl",
|
||||
isMobile && "px-4",
|
||||
)}
|
||||
>
|
||||
<span tabIndex={0} className="sr-only" />
|
||||
{pane == "overview" && (
|
||||
<Header className="justify-center">
|
||||
<Title>{t("details.item.title")}</Title>
|
||||
<Description className="sr-only">
|
||||
{t("details.item.desc")}
|
||||
</Description>
|
||||
<div
|
||||
className={cn(
|
||||
"absolute flex gap-2 lg:flex-col",
|
||||
isDesktop && "right-1 top-8",
|
||||
isMobile && "right-0 top-3",
|
||||
)}
|
||||
>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
aria-label={t("details.item.button.share")}
|
||||
size="sm"
|
||||
onClick={() =>
|
||||
shareOrCopy(`${baseUrl}review?id=${review.id}`)
|
||||
}
|
||||
>
|
||||
<FaShareAlt className="size-4 text-secondary-foreground" />
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipPortal>
|
||||
<TooltipContent>
|
||||
{t("details.item.button.share")}
|
||||
</TooltipContent>
|
||||
</TooltipPortal>
|
||||
</Tooltip>
|
||||
<Tooltip>
|
||||
<TooltipTrigger>
|
||||
<DownloadVideoButton
|
||||
source={`${baseUrl}api/${review.camera}/start/${review.start_time}/end/${review.end_time || Date.now() / 1000}/clip.mp4`}
|
||||
camera={review.camera}
|
||||
startTime={review.start_time}
|
||||
/>
|
||||
</TooltipTrigger>
|
||||
<TooltipPortal>
|
||||
<TooltipContent>
|
||||
{t("button.download", { ns: "common" })}
|
||||
</TooltipContent>
|
||||
</TooltipPortal>
|
||||
</Tooltip>
|
||||
</div>
|
||||
</Header>
|
||||
)}
|
||||
{pane == "overview" && (
|
||||
<div className="flex flex-col gap-5 md:mt-3">
|
||||
{aiAnalysis != undefined && (
|
||||
<div
|
||||
className={cn(
|
||||
"flex h-full w-full flex-col gap-2 rounded-md bg-card p-2",
|
||||
isDesktop && "m-2 w-[90%]",
|
||||
)}
|
||||
>
|
||||
{t("aiAnalysis.title")}
|
||||
<div className="text-sm text-primary/40">
|
||||
{t("details.description.label")}
|
||||
</div>
|
||||
<div className="text-sm">{aiAnalysis.scene}</div>
|
||||
<div className="text-sm text-primary/40">
|
||||
{t("details.score.label")}
|
||||
</div>
|
||||
<div className="text-sm">{aiAnalysis.confidence * 100}%</div>
|
||||
<div className="text-sm text-primary/40">
|
||||
{t("concerns.label")}
|
||||
</div>
|
||||
<div className="text-sm">{aiThreatLevel}</div>
|
||||
</div>
|
||||
)}
|
||||
<div className="flex w-full flex-row">
|
||||
<div className="flex w-full flex-col gap-3">
|
||||
<div className="flex flex-col gap-1.5">
|
||||
<div className="text-sm text-primary/40">
|
||||
{t("details.camera")}
|
||||
</div>
|
||||
<div className="text-sm smart-capitalize">
|
||||
<CameraNameLabel camera={review.camera} />
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex flex-col gap-1.5">
|
||||
<div className="text-sm text-primary/40">
|
||||
{t("details.timestamp")}
|
||||
</div>
|
||||
<div className="text-sm">{formattedDate}</div>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex w-full flex-col items-center gap-2">
|
||||
<div className="flex w-full flex-col gap-1.5 lg:pr-8">
|
||||
<div className="text-sm text-primary/40">
|
||||
{t("details.objects")}
|
||||
</div>
|
||||
<div className="scrollbar-container flex max-h-32 flex-col items-start gap-2 overflow-y-auto text-sm smart-capitalize">
|
||||
{events?.map((event) => {
|
||||
return (
|
||||
<div
|
||||
key={event.id}
|
||||
className="flex flex-row items-center gap-2 smart-capitalize"
|
||||
>
|
||||
{getIconForLabel(
|
||||
event.label,
|
||||
"size-3 text-primary",
|
||||
)}
|
||||
{event.sub_label ??
|
||||
event.label.replaceAll("_", " ")}{" "}
|
||||
({Math.round(event.data.top_score * 100)}%)
|
||||
<Tooltip>
|
||||
<TooltipTrigger>
|
||||
<div
|
||||
className="cursor-pointer"
|
||||
onClick={() => {
|
||||
navigate(`/explore?event_id=${event.id}`);
|
||||
}}
|
||||
>
|
||||
<LuSearch className="size-4 text-muted-foreground" />
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipPortal>
|
||||
<TooltipContent>
|
||||
{t("details.item.button.viewInExplore")}
|
||||
</TooltipContent>
|
||||
</TooltipPortal>
|
||||
</Tooltip>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
{review.data.zones.length > 0 && (
|
||||
<div className="scrollbar-container flex max-h-32 w-full flex-col gap-1.5">
|
||||
<div className="text-sm text-primary/40">
|
||||
{t("details.zones")}
|
||||
</div>
|
||||
<div className="flex flex-col items-start gap-2 text-sm smart-capitalize">
|
||||
{review.data.zones.map((zone) => {
|
||||
return (
|
||||
<div
|
||||
key={zone}
|
||||
className="flex flex-row items-center gap-2 smart-capitalize"
|
||||
>
|
||||
{zone.replaceAll("_", " ")}
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
{hasMismatch && (
|
||||
<div className="p-4 text-center text-sm">
|
||||
{(() => {
|
||||
const detectedCount = Math.abs(
|
||||
(events?.length ?? 0) -
|
||||
(review?.data.detections.length ?? 0),
|
||||
);
|
||||
|
||||
return t("details.item.tips.mismatch", {
|
||||
count: detectedCount,
|
||||
});
|
||||
})()}
|
||||
{missingObjects.length > 0 && (
|
||||
<div className="mt-2">
|
||||
<Trans
|
||||
ns="views/explore"
|
||||
values={{
|
||||
objects: missingObjects
|
||||
.map((x) => getTranslatedLabel(x))
|
||||
.join(", "),
|
||||
}}
|
||||
>
|
||||
details.item.tips.hasMissingObjects
|
||||
</Trans>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
<div className="relative flex size-full flex-col gap-2">
|
||||
{events?.map((event) => (
|
||||
<EventItem
|
||||
key={event.id}
|
||||
event={event}
|
||||
setPane={setPane}
|
||||
setSelectedEvent={setSelectedEvent}
|
||||
setUpload={setUpload}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{pane == "details" && selectedEvent && (
|
||||
<div className="mt-0 flex size-full flex-col gap-2">
|
||||
<TrackingDetails event={selectedEvent} setPane={setPane} />
|
||||
</div>
|
||||
)}
|
||||
</Content>
|
||||
</Overlay>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
type EventItemProps = {
|
||||
event: Event;
|
||||
setPane: React.Dispatch<React.SetStateAction<ReviewDetailPaneType>>;
|
||||
setSelectedEvent: React.Dispatch<React.SetStateAction<Event | undefined>>;
|
||||
setUpload?: React.Dispatch<React.SetStateAction<Event | undefined>>;
|
||||
};
|
||||
|
||||
function EventItem({
|
||||
event,
|
||||
setPane,
|
||||
setSelectedEvent,
|
||||
setUpload,
|
||||
}: EventItemProps) {
|
||||
const { t } = useTranslation(["views/explore"]);
|
||||
|
||||
const { data: config } = useSWR<FrigateConfig>("config", {
|
||||
revalidateOnFocus: false,
|
||||
});
|
||||
|
||||
const apiHost = useApiHost();
|
||||
|
||||
const imgRef = useRef(null);
|
||||
|
||||
const [hovered, setHovered] = useState(isMobile);
|
||||
|
||||
const navigate = useNavigate();
|
||||
|
||||
return (
|
||||
<>
|
||||
<div
|
||||
className={cn(
|
||||
"relative mr-auto",
|
||||
!event.has_snapshot && "flex flex-row items-center justify-center",
|
||||
)}
|
||||
onMouseEnter={isDesktop ? () => setHovered(true) : undefined}
|
||||
onMouseLeave={isDesktop ? () => setHovered(false) : undefined}
|
||||
key={event.id}
|
||||
>
|
||||
{event.has_snapshot && (
|
||||
<>
|
||||
<div className="pointer-events-none absolute inset-x-0 top-0 h-[30%] w-full rounded-lg bg-gradient-to-b from-black/20 to-transparent md:rounded-2xl"></div>
|
||||
<div className="pointer-events-none absolute inset-x-0 bottom-0 z-10 h-[10%] w-full rounded-lg bg-gradient-to-t from-black/20 to-transparent md:rounded-2xl"></div>
|
||||
</>
|
||||
)}
|
||||
<img
|
||||
ref={imgRef}
|
||||
className={cn(
|
||||
"select-none rounded-lg object-contain transition-opacity",
|
||||
)}
|
||||
style={
|
||||
isIOS
|
||||
? {
|
||||
WebkitUserSelect: "none",
|
||||
WebkitTouchCallout: "none",
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
draggable={false}
|
||||
src={
|
||||
event.has_snapshot
|
||||
? `${apiHost}api/events/${event.id}/snapshot.jpg`
|
||||
: `${apiHost}api/events/${event.id}/thumbnail.webp`
|
||||
}
|
||||
/>
|
||||
{hovered && (
|
||||
<div>
|
||||
<div
|
||||
className={cn("absolute right-1 top-1 flex items-center gap-2")}
|
||||
>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<a
|
||||
download
|
||||
href={
|
||||
event.has_snapshot
|
||||
? `${apiHost}api/events/${event.id}/snapshot.jpg`
|
||||
: `${apiHost}api/events/${event.id}/thumbnail.webp`
|
||||
}
|
||||
>
|
||||
<Chip className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500">
|
||||
<FaDownload className="size-4 text-white" />
|
||||
</Chip>
|
||||
</a>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
{t("button.download", { ns: "common" })}
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
|
||||
{event.has_snapshot &&
|
||||
event.plus_id == undefined &&
|
||||
event.data.type == "object" &&
|
||||
config?.plus.enabled && (
|
||||
<Tooltip>
|
||||
<TooltipTrigger>
|
||||
<Chip
|
||||
className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500"
|
||||
onClick={() => {
|
||||
setUpload?.(event);
|
||||
}}
|
||||
>
|
||||
<FrigatePlusIcon className="size-4 text-white" />
|
||||
</Chip>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
{t("itemMenu.submitToPlus.label")}
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
)}
|
||||
|
||||
{event.has_clip && (
|
||||
<Tooltip>
|
||||
<TooltipTrigger>
|
||||
<Chip
|
||||
className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500"
|
||||
onClick={() => {
|
||||
setPane("details");
|
||||
setSelectedEvent(event);
|
||||
}}
|
||||
>
|
||||
<FaArrowsRotate className="size-4 text-white" />
|
||||
</Chip>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
{t("itemMenu.viewTrackingDetails.label")}
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
)}
|
||||
|
||||
{event.has_snapshot && config?.semantic_search.enabled && (
|
||||
<Tooltip>
|
||||
<TooltipTrigger>
|
||||
<Chip
|
||||
className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500"
|
||||
onClick={() => {
|
||||
navigate(
|
||||
`/explore?search_type=similarity&event_id=${event.id}`,
|
||||
);
|
||||
}}
|
||||
>
|
||||
<FaImages className="size-4 text-white" />
|
||||
</Chip>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
{t("itemMenu.findSimilar.label")}
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
);
|
||||
}
|
||||
@ -31,10 +31,9 @@ import {
|
||||
FaDownload,
|
||||
FaHistory,
|
||||
FaImage,
|
||||
FaRegListAlt,
|
||||
FaVideo,
|
||||
} from "react-icons/fa";
|
||||
import TrackingDetails from "./TrackingDetails";
|
||||
import { TrackingDetails } from "./TrackingDetails";
|
||||
import { DetailStreamProvider } from "@/context/detail-stream-context";
|
||||
import {
|
||||
MobilePage,
|
||||
MobilePageContent,
|
||||
@ -80,13 +79,9 @@ import { getTranslatedLabel } from "@/utils/i18n";
|
||||
import { CgTranscript } from "react-icons/cg";
|
||||
import { CameraNameLabel } from "@/components/camera/CameraNameLabel";
|
||||
import { PiPath } from "react-icons/pi";
|
||||
import Heading from "@/components/ui/heading";
|
||||
|
||||
const SEARCH_TABS = [
|
||||
"details",
|
||||
"snapshot",
|
||||
"video",
|
||||
"tracking_details",
|
||||
] as const;
|
||||
const SEARCH_TABS = ["snapshot", "tracking_details"] as const;
|
||||
export type SearchTab = (typeof SEARCH_TABS)[number];
|
||||
|
||||
type SearchDetailDialogProps = {
|
||||
@ -109,6 +104,7 @@ export default function SearchDetailDialog({
|
||||
const { data: config } = useSWR<FrigateConfig>("config", {
|
||||
revalidateOnFocus: false,
|
||||
});
|
||||
const apiHost = useApiHost();
|
||||
|
||||
// tabs
|
||||
|
||||
@ -149,16 +145,6 @@ export default function SearchDetailDialog({
|
||||
|
||||
const views = [...SEARCH_TABS];
|
||||
|
||||
if (!search.has_snapshot) {
|
||||
const index = views.indexOf("snapshot");
|
||||
views.splice(index, 1);
|
||||
}
|
||||
|
||||
if (!search.has_clip) {
|
||||
const index = views.indexOf("video");
|
||||
views.splice(index, 1);
|
||||
}
|
||||
|
||||
if (search.data.type != "object" || !search.has_clip) {
|
||||
const index = views.indexOf("tracking_details");
|
||||
views.splice(index, 1);
|
||||
@ -173,10 +159,50 @@ export default function SearchDetailDialog({
|
||||
}
|
||||
|
||||
if (!searchTabs.includes(pageToggle)) {
|
||||
setSearchPage("details");
|
||||
setSearchPage("snapshot");
|
||||
}
|
||||
}, [pageToggle, searchTabs, setSearchPage]);
|
||||
|
||||
// Tabs component for reuse
|
||||
const tabsComponent = (
|
||||
<ScrollArea className="w-full whitespace-nowrap">
|
||||
<div className="flex flex-row">
|
||||
<ToggleGroup
|
||||
className="*:rounded-md *:px-3 *:py-4"
|
||||
type="single"
|
||||
size="sm"
|
||||
value={pageToggle}
|
||||
onValueChange={(value: SearchTab) => {
|
||||
if (value) {
|
||||
setPageToggle(value);
|
||||
}
|
||||
}}
|
||||
>
|
||||
{Object.values(searchTabs).map((item) => (
|
||||
<ToggleGroupItem
|
||||
key={item}
|
||||
className={`flex scroll-mx-10 items-center justify-between gap-2 ${pageToggle == item ? "" : "*:text-muted-foreground"}`}
|
||||
value={item}
|
||||
data-nav-item={item}
|
||||
aria-label={`Select ${item}`}
|
||||
>
|
||||
{item == "snapshot" && <FaImage className="size-4" />}
|
||||
{item == "tracking_details" && <PiPath className="size-4" />}
|
||||
<div className="smart-capitalize">
|
||||
{item === "snapshot"
|
||||
? search?.has_snapshot
|
||||
? t("type.snapshot")
|
||||
: t("type.thumbnail")
|
||||
: t(`type.${item}`)}
|
||||
</div>
|
||||
</ToggleGroupItem>
|
||||
))}
|
||||
</ToggleGroup>
|
||||
<ScrollBar orientation="horizontal" className="h-0" />
|
||||
</div>
|
||||
</ScrollArea>
|
||||
);
|
||||
|
||||
if (!search) {
|
||||
return;
|
||||
}
|
||||
@ -190,92 +216,188 @@ export default function SearchDetailDialog({
|
||||
const Description = isDesktop ? DialogDescription : MobilePageDescription;
|
||||
|
||||
return (
|
||||
<Overlay
|
||||
open={isOpen}
|
||||
onOpenChange={handleOpenChange}
|
||||
enableHistoryBack={true}
|
||||
<DetailStreamProvider
|
||||
isDetailMode={true}
|
||||
currentTime={(search as unknown as Event)?.start_time ?? 0}
|
||||
camera={(search as unknown as Event)?.camera ?? ""}
|
||||
initialSelectedObjectIds={[(search as unknown as Event).id as string]}
|
||||
>
|
||||
<Content
|
||||
className={cn(
|
||||
"scrollbar-container overflow-y-auto",
|
||||
isDesktop &&
|
||||
"max-h-[95dvh] sm:max-w-xl md:max-w-4xl lg:max-w-4xl xl:max-w-7xl",
|
||||
isMobile && "px-4",
|
||||
)}
|
||||
<Overlay
|
||||
open={isOpen}
|
||||
onOpenChange={handleOpenChange}
|
||||
enableHistoryBack={true}
|
||||
>
|
||||
<Header>
|
||||
<Title>{t("trackedObjectDetails")}</Title>
|
||||
<Description className="sr-only">
|
||||
{t("trackedObjectDetails")}
|
||||
</Description>
|
||||
</Header>
|
||||
<ScrollArea
|
||||
className={cn("w-full whitespace-nowrap", isMobile && "my-2")}
|
||||
<Content
|
||||
className={cn(
|
||||
"scrollbar-container overflow-y-auto",
|
||||
isDesktop &&
|
||||
"max-h-[95dvh] sm:max-w-xl md:max-w-4xl lg:max-w-4xl xl:max-w-7xl",
|
||||
isDesktop &&
|
||||
page == "tracking_details" &&
|
||||
"lg:max-w-[75%] xl:max-w-[80%]",
|
||||
isMobile && "px-4",
|
||||
)}
|
||||
>
|
||||
<div className="flex flex-row">
|
||||
<ToggleGroup
|
||||
className="*:rounded-md *:px-3 *:py-4"
|
||||
type="single"
|
||||
size="sm"
|
||||
value={pageToggle}
|
||||
onValueChange={(value: SearchTab) => {
|
||||
if (value) {
|
||||
setPageToggle(value);
|
||||
}
|
||||
}}
|
||||
>
|
||||
{Object.values(searchTabs).map((item) => (
|
||||
<ToggleGroupItem
|
||||
key={item}
|
||||
className={`flex scroll-mx-10 items-center justify-between gap-2 ${page == "details" ? "last:mr-20" : ""} ${pageToggle == item ? "" : "*:text-muted-foreground"}`}
|
||||
value={item}
|
||||
data-nav-item={item}
|
||||
aria-label={`Select ${item}`}
|
||||
<Header>
|
||||
<Title>{t("trackedObjectDetails")}</Title>
|
||||
<Description className="sr-only">
|
||||
{t("trackedObjectDetails")}
|
||||
</Description>
|
||||
</Header>
|
||||
{isDesktop ? (
|
||||
page === "tracking_details" ? (
|
||||
<TrackingDetails
|
||||
className="size-full"
|
||||
event={search as unknown as Event}
|
||||
tabs={tabsComponent}
|
||||
/>
|
||||
) : (
|
||||
<div className="flex h-full gap-4 overflow-hidden">
|
||||
<div
|
||||
className={cn(
|
||||
"scrollbar-container flex-[3] overflow-y-hidden",
|
||||
page === "snapshot" && !search.has_snapshot && "flex-[2]",
|
||||
)}
|
||||
>
|
||||
{item == "details" && <FaRegListAlt className="size-4" />}
|
||||
{item == "snapshot" && <FaImage className="size-4" />}
|
||||
{item == "video" && <FaVideo className="size-4" />}
|
||||
{item == "tracking_details" && <PiPath className="size-4" />}
|
||||
<div className="smart-capitalize">{t(`type.${item}`)}</div>
|
||||
</ToggleGroupItem>
|
||||
))}
|
||||
</ToggleGroup>
|
||||
<ScrollBar orientation="horizontal" className="h-0" />
|
||||
</div>
|
||||
</ScrollArea>
|
||||
{page == "details" && (
|
||||
<ObjectDetailsTab
|
||||
search={search}
|
||||
config={config}
|
||||
setSearch={setSearch}
|
||||
setSimilarity={setSimilarity}
|
||||
setInputFocused={setInputFocused}
|
||||
/>
|
||||
)}
|
||||
{page == "snapshot" && (
|
||||
<ObjectSnapshotTab
|
||||
search={
|
||||
{
|
||||
...search,
|
||||
plus_id: config?.plus?.enabled ? search.plus_id : "not_enabled",
|
||||
} as unknown as Event
|
||||
}
|
||||
onEventUploaded={() => {
|
||||
search.plus_id = "new_upload";
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
{page == "video" && <VideoTab search={search} />}
|
||||
{page == "tracking_details" && (
|
||||
<TrackingDetails
|
||||
className="w-full overflow-x-hidden"
|
||||
event={search as unknown as Event}
|
||||
fullscreen={true}
|
||||
setPane={() => {}}
|
||||
/>
|
||||
)}
|
||||
</Content>
|
||||
</Overlay>
|
||||
{page === "snapshot" && search.has_snapshot && (
|
||||
<ObjectSnapshotTab
|
||||
search={
|
||||
{
|
||||
...search,
|
||||
plus_id: config?.plus?.enabled
|
||||
? search.plus_id
|
||||
: "not_enabled",
|
||||
} as unknown as Event
|
||||
}
|
||||
onEventUploaded={() => {
|
||||
search.plus_id = "new_upload";
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
{page === "snapshot" && !search.has_snapshot && (
|
||||
<img
|
||||
className="size-full select-none rounded-lg object-contain transition-opacity"
|
||||
style={
|
||||
isIOS
|
||||
? {
|
||||
WebkitUserSelect: "none",
|
||||
WebkitTouchCallout: "none",
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
draggable={false}
|
||||
src={`${apiHost}api/events/${search.id}/thumbnail.webp`}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
<div className="flex flex-[2] flex-col gap-4 overflow-hidden">
|
||||
{tabsComponent}
|
||||
<div className="scrollbar-container flex-1 overflow-y-auto">
|
||||
{page == "snapshot" && (
|
||||
<ObjectDetailsTab
|
||||
search={search}
|
||||
config={config}
|
||||
setSearch={setSearch}
|
||||
setSimilarity={setSimilarity}
|
||||
setInputFocused={setInputFocused}
|
||||
showThumbnail={false}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
) : (
|
||||
<>
|
||||
<ScrollArea
|
||||
className={cn("w-full whitespace-nowrap", isMobile && "my-2")}
|
||||
>
|
||||
<div className="flex flex-row">
|
||||
<ToggleGroup
|
||||
className="*:rounded-md *:px-3 *:py-4"
|
||||
type="single"
|
||||
size="sm"
|
||||
value={pageToggle}
|
||||
onValueChange={(value: SearchTab) => {
|
||||
if (value) {
|
||||
setPageToggle(value);
|
||||
}
|
||||
}}
|
||||
>
|
||||
{Object.values(searchTabs).map((item) => (
|
||||
<ToggleGroupItem
|
||||
key={item}
|
||||
className={`flex scroll-mx-10 items-center justify-between gap-2 ${pageToggle == item ? "" : "*:text-muted-foreground"}`}
|
||||
value={item}
|
||||
data-nav-item={item}
|
||||
aria-label={`Select ${item}`}
|
||||
>
|
||||
{item == "snapshot" && <FaImage className="size-4" />}
|
||||
{item == "tracking_details" && (
|
||||
<PiPath className="size-4" />
|
||||
)}
|
||||
<div className="smart-capitalize">
|
||||
{t(`type.${item}`)}
|
||||
</div>
|
||||
</ToggleGroupItem>
|
||||
))}
|
||||
</ToggleGroup>
|
||||
<ScrollBar orientation="horizontal" className="h-0" />
|
||||
</div>
|
||||
</ScrollArea>
|
||||
{page == "snapshot" && (
|
||||
<>
|
||||
{search.has_snapshot && (
|
||||
<ObjectSnapshotTab
|
||||
search={
|
||||
{
|
||||
...search,
|
||||
plus_id: config?.plus?.enabled
|
||||
? search.plus_id
|
||||
: "not_enabled",
|
||||
} as unknown as Event
|
||||
}
|
||||
onEventUploaded={() => {
|
||||
search.plus_id = "new_upload";
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
{page == "snapshot" && !search.has_snapshot && (
|
||||
<img
|
||||
className="w-full select-none rounded-lg object-contain transition-opacity"
|
||||
style={
|
||||
isIOS
|
||||
? {
|
||||
WebkitUserSelect: "none",
|
||||
WebkitTouchCallout: "none",
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
draggable={false}
|
||||
src={`${apiHost}api/events/${search.id}/thumbnail.webp`}
|
||||
/>
|
||||
)}
|
||||
<Heading as="h3" className="mt-2 smart-capitalize">
|
||||
{t("type.details")}
|
||||
</Heading>
|
||||
<ObjectDetailsTab
|
||||
search={search}
|
||||
config={config}
|
||||
setSearch={setSearch}
|
||||
setSimilarity={setSimilarity}
|
||||
setInputFocused={setInputFocused}
|
||||
showThumbnail={false}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
{page == "tracking_details" && (
|
||||
<TrackingDetails event={search as unknown as Event} />
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</Content>
|
||||
</Overlay>
|
||||
</DetailStreamProvider>
|
||||
);
|
||||
}
|
||||
|
||||
@ -285,6 +407,7 @@ type ObjectDetailsTabProps = {
|
||||
setSearch: (search: SearchResult | undefined) => void;
|
||||
setSimilarity?: () => void;
|
||||
setInputFocused: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
showThumbnail?: boolean;
|
||||
};
|
||||
function ObjectDetailsTab({
|
||||
search,
|
||||
@ -292,6 +415,7 @@ function ObjectDetailsTab({
|
||||
setSearch,
|
||||
setSimilarity,
|
||||
setInputFocused,
|
||||
showThumbnail = true,
|
||||
}: ObjectDetailsTabProps) {
|
||||
const { t } = useTranslation(["views/explore", "views/faceLibrary"]);
|
||||
|
||||
@ -873,66 +997,71 @@ function ObjectDetailsTab({
|
||||
<div className="text-sm">{formattedDate}</div>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex w-full flex-col gap-2 pl-6">
|
||||
<img
|
||||
className="aspect-video select-none rounded-lg object-contain transition-opacity"
|
||||
style={
|
||||
isIOS
|
||||
? {
|
||||
WebkitUserSelect: "none",
|
||||
WebkitTouchCallout: "none",
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
draggable={false}
|
||||
src={`${apiHost}api/events/${search.id}/thumbnail.webp`}
|
||||
/>
|
||||
<div
|
||||
className={cn("flex w-full flex-row gap-2", isMobile && "flex-col")}
|
||||
>
|
||||
{config?.semantic_search.enabled &&
|
||||
setSimilarity != undefined &&
|
||||
search.data.type == "object" && (
|
||||
<Button
|
||||
{showThumbnail && (
|
||||
<div className="flex w-full flex-col gap-2 pl-6">
|
||||
<img
|
||||
className="aspect-video select-none rounded-lg object-contain transition-opacity"
|
||||
style={
|
||||
isIOS
|
||||
? {
|
||||
WebkitUserSelect: "none",
|
||||
WebkitTouchCallout: "none",
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
draggable={false}
|
||||
src={`${apiHost}api/events/${search.id}/thumbnail.webp`}
|
||||
/>
|
||||
<div
|
||||
className={cn(
|
||||
"flex w-full flex-row gap-2",
|
||||
isMobile && "flex-col",
|
||||
)}
|
||||
>
|
||||
{config?.semantic_search.enabled &&
|
||||
setSimilarity != undefined &&
|
||||
search.data.type == "object" && (
|
||||
<Button
|
||||
className="w-full"
|
||||
aria-label={t("itemMenu.findSimilar.aria")}
|
||||
onClick={() => {
|
||||
setSearch(undefined);
|
||||
setSimilarity();
|
||||
}}
|
||||
>
|
||||
<div className="flex gap-1">
|
||||
<LuSearch />
|
||||
{t("itemMenu.findSimilar.label")}
|
||||
</div>
|
||||
</Button>
|
||||
)}
|
||||
{hasFace && (
|
||||
<FaceSelectionDialog
|
||||
className="w-full"
|
||||
aria-label={t("itemMenu.findSimilar.aria")}
|
||||
onClick={() => {
|
||||
setSearch(undefined);
|
||||
setSimilarity();
|
||||
}}
|
||||
faceNames={faceNames}
|
||||
onTrainAttempt={onTrainFace}
|
||||
>
|
||||
<div className="flex gap-1">
|
||||
<LuSearch />
|
||||
{t("itemMenu.findSimilar.label")}
|
||||
</div>
|
||||
</Button>
|
||||
)}
|
||||
{hasFace && (
|
||||
<FaceSelectionDialog
|
||||
className="w-full"
|
||||
faceNames={faceNames}
|
||||
onTrainAttempt={onTrainFace}
|
||||
>
|
||||
<Button className="w-full">
|
||||
<div className="flex gap-1">
|
||||
<TbFaceId />
|
||||
{t("trainFace", { ns: "views/faceLibrary" })}
|
||||
</div>
|
||||
</Button>
|
||||
</FaceSelectionDialog>
|
||||
)}
|
||||
{config?.cameras[search?.camera].audio_transcription.enabled &&
|
||||
search?.label == "speech" &&
|
||||
search?.end_time && (
|
||||
<Button className="w-full" onClick={onTranscribe}>
|
||||
<div className="flex gap-1">
|
||||
<CgTranscript />
|
||||
{t("itemMenu.audioTranscription.label")}
|
||||
</div>
|
||||
</Button>
|
||||
<Button className="w-full">
|
||||
<div className="flex gap-1">
|
||||
<TbFaceId />
|
||||
{t("trainFace", { ns: "views/faceLibrary" })}
|
||||
</div>
|
||||
</Button>
|
||||
</FaceSelectionDialog>
|
||||
)}
|
||||
{config?.cameras[search?.camera].audio_transcription.enabled &&
|
||||
search?.label == "speech" &&
|
||||
search?.end_time && (
|
||||
<Button className="w-full" onClick={onTranscribe}>
|
||||
<div className="flex gap-1">
|
||||
<CgTranscript />
|
||||
{t("itemMenu.audioTranscription.label")}
|
||||
</div>
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
<div className="flex flex-col gap-1.5">
|
||||
{config?.cameras[search.camera].objects.genai.enabled &&
|
||||
@ -1167,7 +1296,7 @@ export function ObjectSnapshotTab({
|
||||
search.label != "on_demand" && (
|
||||
<Card className="p-1 text-sm md:p-2">
|
||||
<CardContent className="flex flex-col items-center justify-between gap-3 p-2 md:flex-row">
|
||||
<div className={cn("flex flex-col space-y-3")}>
|
||||
<div className={cn("flex max-w-sm flex-col space-y-3")}>
|
||||
<div className={"text-lg leading-none"}>
|
||||
{t("explore.plus.submitToPlus.label")}
|
||||
</div>
|
||||
@ -1176,7 +1305,7 @@ export function ObjectSnapshotTab({
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex w-full flex-1 flex-col justify-center gap-2 md:ml-8 md:w-auto md:justify-end">
|
||||
<div className="flex w-full flex-1 flex-col justify-center gap-2 md:ml-8 md:flex-1 md:justify-end">
|
||||
{state == "reviewing" && (
|
||||
<>
|
||||
<div>
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -174,9 +174,7 @@ export default function CameraWizardDialog({
|
||||
...(friendlyName && { friendly_name: friendlyName }),
|
||||
ffmpeg: {
|
||||
inputs: wizardData.streams.map((stream, index) => {
|
||||
const isRestreamed =
|
||||
wizardData.restreamIds?.includes(stream.id) ?? false;
|
||||
if (isRestreamed) {
|
||||
if (stream.restream) {
|
||||
const go2rtcStreamName =
|
||||
wizardData.streams!.length === 1
|
||||
? finalCameraName
|
||||
@ -234,7 +232,11 @@ export default function CameraWizardDialog({
|
||||
wizardData.streams!.length === 1
|
||||
? finalCameraName
|
||||
: `${finalCameraName}_${index + 1}`;
|
||||
go2rtcStreams[streamName] = [stream.url];
|
||||
|
||||
const streamUrl = stream.useFfmpeg
|
||||
? `ffmpeg:${stream.url}`
|
||||
: stream.url;
|
||||
go2rtcStreams[streamName] = [streamUrl];
|
||||
});
|
||||
|
||||
if (Object.keys(go2rtcStreams).length > 0) {
|
||||
|
||||
@ -608,6 +608,12 @@ export default function Step1NameCamera({
|
||||
</div>
|
||||
)}
|
||||
|
||||
{isTesting && (
|
||||
<div className="flex items-center gap-2 text-sm text-muted-foreground">
|
||||
<ActivityIndicator className="size-4" />
|
||||
{testStatus}
|
||||
</div>
|
||||
)}
|
||||
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
|
||||
<Button
|
||||
type="button"
|
||||
@ -635,10 +641,7 @@ export default function Step1NameCamera({
|
||||
variant="select"
|
||||
className="flex items-center justify-center gap-2 sm:flex-1"
|
||||
>
|
||||
{isTesting && <ActivityIndicator className="size-4" />}
|
||||
{isTesting && testStatus
|
||||
? testStatus
|
||||
: t("cameraWizard.step1.testConnection")}
|
||||
{t("cameraWizard.step1.testConnection")}
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@ -201,16 +201,12 @@ export default function Step2StreamConfig({
|
||||
|
||||
const setRestream = useCallback(
|
||||
(streamId: string) => {
|
||||
const currentIds = wizardData.restreamIds || [];
|
||||
const isSelected = currentIds.includes(streamId);
|
||||
const newIds = isSelected
|
||||
? currentIds.filter((id) => id !== streamId)
|
||||
: [...currentIds, streamId];
|
||||
onUpdate({
|
||||
restreamIds: newIds,
|
||||
});
|
||||
const stream = streams.find((s) => s.id === streamId);
|
||||
if (!stream) return;
|
||||
|
||||
updateStream(streamId, { restream: !stream.restream });
|
||||
},
|
||||
[wizardData.restreamIds, onUpdate],
|
||||
[streams, updateStream],
|
||||
);
|
||||
|
||||
const hasDetectRole = streams.some((s) => s.roles.includes("detect"));
|
||||
@ -435,9 +431,7 @@ export default function Step2StreamConfig({
|
||||
{t("cameraWizard.step2.go2rtc")}
|
||||
</span>
|
||||
<Switch
|
||||
checked={(wizardData.restreamIds || []).includes(
|
||||
stream.id,
|
||||
)}
|
||||
checked={stream.restream || false}
|
||||
onCheckedChange={() => setRestream(stream.id)}
|
||||
/>
|
||||
</div>
|
||||
|
||||
@ -1,7 +1,13 @@
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Badge } from "@/components/ui/badge";
|
||||
import { Switch } from "@/components/ui/switch";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/ui/popover";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { LuRotateCcw } from "react-icons/lu";
|
||||
import { LuRotateCcw, LuInfo } from "react-icons/lu";
|
||||
import { useState, useCallback, useMemo, useEffect } from "react";
|
||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||
import axios from "axios";
|
||||
@ -216,7 +222,6 @@ export default function Step3Validation({
|
||||
brandTemplate: wizardData.brandTemplate,
|
||||
customUrl: wizardData.customUrl,
|
||||
streams: wizardData.streams,
|
||||
restreamIds: wizardData.restreamIds,
|
||||
};
|
||||
|
||||
onSave(configData);
|
||||
@ -322,6 +327,51 @@ export default function Step3Validation({
|
||||
</div>
|
||||
)}
|
||||
|
||||
{result?.success && (
|
||||
<div className="mb-3 flex items-center justify-between">
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-sm">
|
||||
{t("cameraWizard.step3.ffmpegModule")}
|
||||
</span>
|
||||
<Popover>
|
||||
<PopoverTrigger asChild>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
className="h-4 w-4 p-0"
|
||||
>
|
||||
<LuInfo className="size-3" />
|
||||
</Button>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent className="pointer-events-auto w-80 text-xs">
|
||||
<div className="space-y-2">
|
||||
<div className="font-medium">
|
||||
{t("cameraWizard.step3.ffmpegModule")}
|
||||
</div>
|
||||
<div className="text-muted-foreground">
|
||||
{t(
|
||||
"cameraWizard.step3.ffmpegModuleDescription",
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
</div>
|
||||
<Switch
|
||||
checked={stream.useFfmpeg || false}
|
||||
onCheckedChange={(checked) => {
|
||||
onUpdate({
|
||||
streams: streams.map((s) =>
|
||||
s.id === stream.id
|
||||
? { ...s, useFfmpeg: checked }
|
||||
: s,
|
||||
),
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="mb-2 flex flex-col justify-between gap-1 md:flex-row md:items-center">
|
||||
<span className="break-all text-sm text-muted-foreground">
|
||||
{stream.url}
|
||||
@ -491,8 +541,7 @@ function StreamIssues({
|
||||
|
||||
// Restreaming check
|
||||
if (stream.roles.includes("record")) {
|
||||
const restreamIds = wizardData.restreamIds || [];
|
||||
if (restreamIds.includes(stream.id)) {
|
||||
if (stream.restream) {
|
||||
result.push({
|
||||
type: "warning",
|
||||
message: t("cameraWizard.step3.issues.restreamingWarning"),
|
||||
@ -660,9 +709,10 @@ function StreamPreview({ stream, onBandwidthUpdate }: StreamPreviewProps) {
|
||||
|
||||
useEffect(() => {
|
||||
// Register stream with go2rtc
|
||||
const streamUrl = stream.useFfmpeg ? `ffmpeg:${stream.url}` : stream.url;
|
||||
axios
|
||||
.put(`go2rtc/streams/${streamId}`, null, {
|
||||
params: { src: stream.url },
|
||||
params: { src: streamUrl },
|
||||
})
|
||||
.then(() => {
|
||||
// Add small delay to allow go2rtc api to run and initialize the stream
|
||||
@ -680,7 +730,7 @@ function StreamPreview({ stream, onBandwidthUpdate }: StreamPreviewProps) {
|
||||
// do nothing on cleanup errors - go2rtc won't consume the streams
|
||||
});
|
||||
};
|
||||
}, [stream.url, streamId]);
|
||||
}, [stream.url, stream.useFfmpeg, streamId]);
|
||||
|
||||
const resolution = stream.testResult?.resolution;
|
||||
let aspectRatio = "16/9";
|
||||
|
||||
@ -57,7 +57,7 @@ export default function DetailStream({
|
||||
elementRef: scrollRef,
|
||||
});
|
||||
|
||||
const effectiveTime = currentTime + annotationOffset / 1000;
|
||||
const effectiveTime = currentTime - annotationOffset / 1000;
|
||||
const [upload, setUpload] = useState<Event | undefined>(undefined);
|
||||
const [controlsExpanded, setControlsExpanded] = useState(false);
|
||||
const [alwaysExpandActive, setAlwaysExpandActive] = usePersistence(
|
||||
@ -213,6 +213,7 @@ export default function DetailStream({
|
||||
config={config}
|
||||
onSeek={onSeekCheckPlaying}
|
||||
effectiveTime={effectiveTime}
|
||||
annotationOffset={annotationOffset}
|
||||
isActive={activeReviewId == id}
|
||||
onActivate={() => setActiveReviewId(id)}
|
||||
onOpenUpload={(e) => setUpload(e)}
|
||||
@ -278,6 +279,7 @@ type ReviewGroupProps = {
|
||||
onActivate?: () => void;
|
||||
onOpenUpload?: (e: Event) => void;
|
||||
effectiveTime?: number;
|
||||
annotationOffset: number;
|
||||
alwaysExpandActive?: boolean;
|
||||
};
|
||||
|
||||
@ -290,11 +292,14 @@ function ReviewGroup({
|
||||
onActivate,
|
||||
onOpenUpload,
|
||||
effectiveTime,
|
||||
annotationOffset,
|
||||
alwaysExpandActive = false,
|
||||
}: ReviewGroupProps) {
|
||||
const { t } = useTranslation("views/events");
|
||||
const [open, setOpen] = useState(false);
|
||||
const start = review.start_time ?? 0;
|
||||
// review.start_time is in detect time, convert to record for seeking
|
||||
const startRecord = start + annotationOffset / 1000;
|
||||
|
||||
// Auto-expand when this review becomes active and alwaysExpandActive is enabled
|
||||
useEffect(() => {
|
||||
@ -371,7 +376,7 @@ function ReviewGroup({
|
||||
)}
|
||||
onClick={() => {
|
||||
onActivate?.();
|
||||
onSeek(start);
|
||||
onSeek(startRecord);
|
||||
}}
|
||||
>
|
||||
<div className="ml-4 mr-2 mt-1.5 flex flex-row items-start">
|
||||
@ -450,6 +455,7 @@ function ReviewGroup({
|
||||
key={event.id}
|
||||
event={event}
|
||||
effectiveTime={effectiveTime}
|
||||
annotationOffset={annotationOffset}
|
||||
onSeek={onSeek}
|
||||
onOpenUpload={onOpenUpload}
|
||||
/>
|
||||
@ -483,12 +489,14 @@ function ReviewGroup({
|
||||
type EventListProps = {
|
||||
event: Event;
|
||||
effectiveTime?: number;
|
||||
annotationOffset: number;
|
||||
onSeek: (ts: number, play?: boolean) => void;
|
||||
onOpenUpload?: (e: Event) => void;
|
||||
};
|
||||
function EventList({
|
||||
event,
|
||||
effectiveTime,
|
||||
annotationOffset,
|
||||
onSeek,
|
||||
onOpenUpload,
|
||||
}: EventListProps) {
|
||||
@ -505,14 +513,17 @@ function EventList({
|
||||
if (event) {
|
||||
setSelectedObjectIds([]);
|
||||
setSelectedObjectIds([event.id]);
|
||||
onSeek(event.start_time);
|
||||
// event.start_time is detect time, convert to record
|
||||
const recordTime = event.start_time + annotationOffset / 1000;
|
||||
onSeek(recordTime);
|
||||
} else {
|
||||
setSelectedObjectIds([]);
|
||||
}
|
||||
};
|
||||
|
||||
const handleTimelineClick = (ts: number, play?: boolean) => {
|
||||
handleObjectSelect(event);
|
||||
setSelectedObjectIds([]);
|
||||
setSelectedObjectIds([event.id]);
|
||||
onSeek(ts, play);
|
||||
};
|
||||
|
||||
@ -554,7 +565,6 @@ function EventList({
|
||||
)}
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
onSeek(event.start_time);
|
||||
handleObjectSelect(event);
|
||||
}}
|
||||
role="button"
|
||||
@ -568,7 +578,6 @@ function EventList({
|
||||
className="flex flex-1 items-center gap-2"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
onSeek(event.start_time);
|
||||
handleObjectSelect(event);
|
||||
}}
|
||||
role="button"
|
||||
@ -607,6 +616,7 @@ function EventList({
|
||||
eventId={event.id}
|
||||
onSeek={handleTimelineClick}
|
||||
effectiveTime={effectiveTime}
|
||||
annotationOffset={annotationOffset}
|
||||
startTime={event.start_time}
|
||||
endTime={event.end_time}
|
||||
/>
|
||||
@ -621,6 +631,7 @@ type LifecycleItemProps = {
|
||||
isActive?: boolean;
|
||||
onSeek?: (timestamp: number, play?: boolean) => void;
|
||||
effectiveTime?: number;
|
||||
annotationOffset: number;
|
||||
isTimelineActive?: boolean;
|
||||
};
|
||||
|
||||
@ -629,6 +640,7 @@ function LifecycleItem({
|
||||
isActive,
|
||||
onSeek,
|
||||
effectiveTime,
|
||||
annotationOffset,
|
||||
isTimelineActive = false,
|
||||
}: LifecycleItemProps) {
|
||||
const { t } = useTranslation("views/events");
|
||||
@ -682,7 +694,8 @@ function LifecycleItem({
|
||||
<div
|
||||
role="button"
|
||||
onClick={() => {
|
||||
onSeek?.(item.timestamp, false);
|
||||
const recordTimestamp = item.timestamp + annotationOffset / 1000;
|
||||
onSeek?.(recordTimestamp, false);
|
||||
}}
|
||||
className={cn(
|
||||
"flex cursor-pointer items-center gap-2 text-sm text-primary-variant",
|
||||
@ -751,12 +764,14 @@ function ObjectTimeline({
|
||||
eventId,
|
||||
onSeek,
|
||||
effectiveTime,
|
||||
annotationOffset,
|
||||
startTime,
|
||||
endTime,
|
||||
}: {
|
||||
eventId: string;
|
||||
onSeek: (ts: number, play?: boolean) => void;
|
||||
effectiveTime?: number;
|
||||
annotationOffset: number;
|
||||
startTime?: number;
|
||||
endTime?: number;
|
||||
}) {
|
||||
@ -857,6 +872,7 @@ function ObjectTimeline({
|
||||
onSeek={onSeek}
|
||||
isActive={isActive}
|
||||
effectiveTime={effectiveTime}
|
||||
annotationOffset={annotationOffset}
|
||||
isTimelineActive={isWithinEventRange}
|
||||
/>
|
||||
);
|
||||
|
||||
@ -22,6 +22,7 @@ interface DetailStreamProviderProps {
|
||||
isDetailMode: boolean;
|
||||
currentTime: number;
|
||||
camera: string;
|
||||
initialSelectedObjectIds?: string[];
|
||||
}
|
||||
|
||||
export function DetailStreamProvider({
|
||||
@ -29,8 +30,11 @@ export function DetailStreamProvider({
|
||||
isDetailMode,
|
||||
currentTime,
|
||||
camera,
|
||||
initialSelectedObjectIds,
|
||||
}: DetailStreamProviderProps) {
|
||||
const [selectedObjectIds, setSelectedObjectIds] = useState<string[]>([]);
|
||||
const [selectedObjectIds, setSelectedObjectIds] = useState<string[]>(
|
||||
() => initialSelectedObjectIds ?? [],
|
||||
);
|
||||
|
||||
const toggleObjectSelection = (id: string | undefined) => {
|
||||
if (id === undefined) {
|
||||
|
||||
@ -85,6 +85,8 @@ export type StreamConfig = {
|
||||
quality?: string;
|
||||
testResult?: TestResult;
|
||||
userTested?: boolean;
|
||||
useFfmpeg?: boolean;
|
||||
restream?: boolean;
|
||||
};
|
||||
|
||||
export type TestResult = {
|
||||
@ -105,7 +107,6 @@ export type WizardFormData = {
|
||||
brandTemplate?: CameraBrand;
|
||||
customUrl?: string;
|
||||
streams?: StreamConfig[];
|
||||
restreamIds?: string[];
|
||||
};
|
||||
|
||||
// API Response Types
|
||||
@ -146,6 +147,7 @@ export type CameraConfigData = {
|
||||
inputs: {
|
||||
path: string;
|
||||
roles: string[];
|
||||
input_args?: string;
|
||||
}[];
|
||||
};
|
||||
live?: {
|
||||
|
||||
@ -13,7 +13,8 @@ function formatZonesList(zones: string[]): string {
|
||||
});
|
||||
}
|
||||
|
||||
const allButLast = zones.slice(0, -1).join(", ");
|
||||
const separatorWithSpace = t("list.separatorWithSpace", { ns: "common" });
|
||||
const allButLast = zones.slice(0, -1).join(separatorWithSpace);
|
||||
return t("list.many", {
|
||||
items: allButLast,
|
||||
last: zones[zones.length - 1],
|
||||
|
||||
@ -2,7 +2,7 @@ import { baseUrl } from "@/api/baseUrl";
|
||||
import ClassificationModelWizardDialog from "@/components/classification/ClassificationModelWizardDialog";
|
||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||
import { ImageShadowOverlay } from "@/components/overlay/ImageShadowOverlay";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Button, buttonVariants } from "@/components/ui/button";
|
||||
import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
|
||||
import useOptimisticState from "@/hooks/use-optimistic-state";
|
||||
import { cn } from "@/lib/utils";
|
||||
@ -10,13 +10,34 @@ import {
|
||||
CustomClassificationModelConfig,
|
||||
FrigateConfig,
|
||||
} from "@/types/frigateConfig";
|
||||
import { useEffect, useMemo, useState } from "react";
|
||||
import { useCallback, useEffect, useMemo, useState } from "react";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { FaFolderPlus } from "react-icons/fa";
|
||||
import { MdModelTraining } from "react-icons/md";
|
||||
import { LuTrash2 } from "react-icons/lu";
|
||||
import { FiMoreVertical } from "react-icons/fi";
|
||||
import useSWR from "swr";
|
||||
import Heading from "@/components/ui/heading";
|
||||
import { useOverlayState } from "@/hooks/use-overlay-state";
|
||||
import axios from "axios";
|
||||
import { toast } from "sonner";
|
||||
import {
|
||||
DropdownMenu,
|
||||
DropdownMenuContent,
|
||||
DropdownMenuItem,
|
||||
DropdownMenuTrigger,
|
||||
} from "@/components/ui/dropdown-menu";
|
||||
import {
|
||||
AlertDialog,
|
||||
AlertDialogAction,
|
||||
AlertDialogCancel,
|
||||
AlertDialogContent,
|
||||
AlertDialogDescription,
|
||||
AlertDialogFooter,
|
||||
AlertDialogHeader,
|
||||
AlertDialogTitle,
|
||||
} from "@/components/ui/alert-dialog";
|
||||
import BlurredIconButton from "@/components/button/BlurredIconButton";
|
||||
|
||||
const allModelTypes = ["objects", "states"] as const;
|
||||
type ModelType = (typeof allModelTypes)[number];
|
||||
@ -126,7 +147,7 @@ export default function ModelSelectionView({
|
||||
onClick={() => setNewModel(true)}
|
||||
>
|
||||
<FaFolderPlus />
|
||||
Add Classification
|
||||
{t("button.addClassification")}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
@ -142,6 +163,7 @@ export default function ModelSelectionView({
|
||||
key={config.name}
|
||||
config={config}
|
||||
onClick={() => onClick(config)}
|
||||
onDelete={() => refreshConfig()}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
@ -179,12 +201,55 @@ function NoModelsView({
|
||||
type ModelCardProps = {
|
||||
config: CustomClassificationModelConfig;
|
||||
onClick: () => void;
|
||||
onDelete: () => void;
|
||||
};
|
||||
function ModelCard({ config, onClick }: ModelCardProps) {
|
||||
function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
|
||||
const { t } = useTranslation(["views/classificationModel"]);
|
||||
|
||||
const { data: dataset } = useSWR<{
|
||||
[id: string]: string[];
|
||||
}>(`classification/${config.name}/dataset`, { revalidateOnFocus: false });
|
||||
|
||||
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
|
||||
|
||||
const handleDelete = useCallback(async () => {
|
||||
try {
|
||||
await axios.delete(`classification/${config.name}`);
|
||||
await axios.put("/config/set", {
|
||||
requires_restart: 0,
|
||||
update_topic: `config/classification/custom/${config.name}`,
|
||||
config_data: {
|
||||
classification: {
|
||||
custom: {
|
||||
[config.name]: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
toast.success(t("toast.success.deletedModel", { count: 1 }), {
|
||||
position: "top-center",
|
||||
});
|
||||
onDelete();
|
||||
} catch (err) {
|
||||
const error = err as {
|
||||
response?: { data?: { message?: string; detail?: string } };
|
||||
};
|
||||
const errorMessage =
|
||||
error.response?.data?.message ||
|
||||
error.response?.data?.detail ||
|
||||
"Unknown error";
|
||||
toast.error(t("toast.error.deleteModelFailed", { errorMessage }), {
|
||||
position: "top-center",
|
||||
});
|
||||
}
|
||||
}, [config, onDelete, t]);
|
||||
|
||||
const handleDeleteClick = useCallback((e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
setDeleteDialogOpen(true);
|
||||
}, []);
|
||||
|
||||
const coverImage = useMemo(() => {
|
||||
if (!dataset) {
|
||||
return undefined;
|
||||
@ -204,22 +269,65 @@ function ModelCard({ config, onClick }: ModelCardProps) {
|
||||
}, [dataset]);
|
||||
|
||||
return (
|
||||
<div
|
||||
key={config.name}
|
||||
className={cn(
|
||||
"relative aspect-square w-full cursor-pointer overflow-hidden rounded-lg",
|
||||
"outline-transparent duration-500",
|
||||
)}
|
||||
onClick={() => onClick()}
|
||||
>
|
||||
<img
|
||||
className="size-full"
|
||||
src={`${baseUrl}clips/${config.name}/dataset/${coverImage?.name}/${coverImage?.img}`}
|
||||
/>
|
||||
<ImageShadowOverlay />
|
||||
<div className="absolute bottom-2 left-3 text-lg smart-capitalize">
|
||||
{config.name}
|
||||
<>
|
||||
<AlertDialog
|
||||
open={deleteDialogOpen}
|
||||
onOpenChange={() => setDeleteDialogOpen(!deleteDialogOpen)}
|
||||
>
|
||||
<AlertDialogContent>
|
||||
<AlertDialogHeader>
|
||||
<AlertDialogTitle>{t("deleteModel.title")}</AlertDialogTitle>
|
||||
</AlertDialogHeader>
|
||||
<AlertDialogDescription>
|
||||
{t("deleteModel.single", { name: config.name })}
|
||||
</AlertDialogDescription>
|
||||
<AlertDialogFooter>
|
||||
<AlertDialogCancel>
|
||||
{t("button.cancel", { ns: "common" })}
|
||||
</AlertDialogCancel>
|
||||
<AlertDialogAction
|
||||
className={buttonVariants({ variant: "destructive" })}
|
||||
onClick={handleDelete}
|
||||
>
|
||||
{t("button.delete", { ns: "common" })}
|
||||
</AlertDialogAction>
|
||||
</AlertDialogFooter>
|
||||
</AlertDialogContent>
|
||||
</AlertDialog>
|
||||
|
||||
<div
|
||||
className={cn(
|
||||
"relative aspect-square w-full cursor-pointer overflow-hidden rounded-lg",
|
||||
)}
|
||||
onClick={onClick}
|
||||
>
|
||||
<img
|
||||
className="size-full"
|
||||
src={`${baseUrl}clips/${config.name}/dataset/${coverImage?.name}/${coverImage?.img}`}
|
||||
/>
|
||||
<ImageShadowOverlay lowerClassName="h-[30%] z-0" />
|
||||
<div className="absolute bottom-2 left-3 text-lg text-white smart-capitalize">
|
||||
{config.name}
|
||||
</div>
|
||||
<div className="absolute bottom-2 right-2 z-40">
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild onClick={(e) => e.stopPropagation()}>
|
||||
<BlurredIconButton>
|
||||
<FiMoreVertical className="size-5 text-white" />
|
||||
</BlurredIconButton>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent
|
||||
align="end"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
>
|
||||
<DropdownMenuItem onClick={handleDeleteClick}>
|
||||
<LuTrash2 className="mr-2 size-4" />
|
||||
<span>{t("button.delete", { ns: "common" })}</span>
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
@ -893,7 +893,7 @@ function ObjectTrainGrid({
|
||||
// selection
|
||||
|
||||
const [selectedEvent, setSelectedEvent] = useState<Event>();
|
||||
const [dialogTab, setDialogTab] = useState<SearchTab>("details");
|
||||
const [dialogTab, setDialogTab] = useState<SearchTab>("snapshot");
|
||||
|
||||
// handlers
|
||||
|
||||
|
||||
@ -214,7 +214,7 @@ export default function SearchView({
|
||||
// detail
|
||||
|
||||
const [searchDetail, setSearchDetail] = useState<SearchResult>();
|
||||
const [page, setPage] = useState<SearchTab>("details");
|
||||
const [page, setPage] = useState<SearchTab>("snapshot");
|
||||
|
||||
// search interaction
|
||||
|
||||
@ -222,7 +222,7 @@ export default function SearchView({
|
||||
const itemRefs = useRef<(HTMLDivElement | null)[]>([]);
|
||||
|
||||
const onSelectSearch = useCallback(
|
||||
(item: SearchResult, ctrl: boolean, page: SearchTab = "details") => {
|
||||
(item: SearchResult, ctrl: boolean, page: SearchTab = "snapshot") => {
|
||||
if (selectedObjects.length > 1 || ctrl) {
|
||||
const index = selectedObjects.indexOf(item.id);
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user