mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-02 06:23:42 +00:00
Merge branch 'dev' into dev-zone-friendly-name
This commit is contained in:
commit
f52f273e4c
@ -39,6 +39,26 @@ Each installation and even camera can have different parameters for what is cons
|
||||
- Brief movement with legitimate items (bags, packages, tools, equipment) in appropriate zones is routine.
|
||||
```
|
||||
|
||||
### Image Source
|
||||
|
||||
By default, review summaries use preview images (cached preview frames) which have a lower resolution but use fewer tokens per image. For better image quality and more detailed analysis, you can configure Frigate to extract frames directly from recordings at a higher resolution:
|
||||
|
||||
```yaml
|
||||
review:
|
||||
genai:
|
||||
enabled: true
|
||||
image_source: recordings # Options: "preview" (default) or "recordings"
|
||||
```
|
||||
|
||||
When using `recordings`, frames are extracted at 480p resolution (480px height), providing better detail for the LLM while being mindful of context window size. This is particularly useful for scenarios where fine details matter, such as identifying license plates, reading text, or analyzing distant objects. Note that using recordings will:
|
||||
|
||||
- Provide higher quality images to the LLM (480p vs 180p preview images)
|
||||
- Use more tokens per image (~200-300 tokens vs ~100 tokens for preview)
|
||||
- Result in fewer frames being sent to stay within context limits (typically 6-12 frames vs 8-20 frames)
|
||||
- Require that recordings are enabled for the camera
|
||||
|
||||
If recordings are not available for a given time period, the system will automatically fall back to using preview frames.
|
||||
|
||||
### Additional Concerns
|
||||
|
||||
Along with the concern of suspicious activity or immediate threat, you may have concerns such as animals in your garden or a gate being left open. These concerns can be configured so that the review summaries will make note of them if the activity requires additional review. For example:
|
||||
|
||||
@ -429,6 +429,10 @@ review:
|
||||
alerts: True
|
||||
# Optional: Enable GenAI review summaries for detections (default: shown below)
|
||||
detections: False
|
||||
# Optional: Image source for GenAI (default: preview)
|
||||
# Options: "preview" (uses cached preview frames at 180p) or "recordings" (extracts frames from recordings at 480p)
|
||||
# Using "recordings" provides better image quality but uses ~2-3x more tokens per image (~200-300 vs ~100 tokens)
|
||||
image_source: preview
|
||||
# Optional: Additional concerns that the GenAI should make note of (default: None)
|
||||
additional_concerns:
|
||||
- Animals in the garden
|
||||
|
||||
@ -1,10 +1,18 @@
|
||||
from enum import Enum
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import Field, field_validator
|
||||
|
||||
from ..base import FrigateBaseModel
|
||||
|
||||
__all__ = ["ReviewConfig", "DetectionsConfig", "AlertsConfig"]
|
||||
__all__ = ["ReviewConfig", "DetectionsConfig", "AlertsConfig", "ImageSourceEnum"]
|
||||
|
||||
|
||||
class ImageSourceEnum(str, Enum):
|
||||
"""Image source options for GenAI Review."""
|
||||
|
||||
preview = "preview"
|
||||
recordings = "recordings"
|
||||
|
||||
|
||||
DEFAULT_ALERT_OBJECTS = ["person", "car"]
|
||||
@ -77,6 +85,10 @@ class GenAIReviewConfig(FrigateBaseModel):
|
||||
)
|
||||
alerts: bool = Field(default=True, title="Enable GenAI for alerts.")
|
||||
detections: bool = Field(default=False, title="Enable GenAI for detections.")
|
||||
image_source: ImageSourceEnum = Field(
|
||||
default=ImageSourceEnum.preview,
|
||||
title="Image source for review descriptions.",
|
||||
)
|
||||
additional_concerns: list[str] = Field(
|
||||
default=[],
|
||||
title="Additional concerns that GenAI should make note of on this camera.",
|
||||
|
||||
@ -3,6 +3,7 @@
|
||||
import copy
|
||||
import datetime
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import shutil
|
||||
import threading
|
||||
@ -10,16 +11,18 @@ from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import cv2
|
||||
from peewee import DoesNotExist
|
||||
|
||||
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera.review import GenAIReviewConfig
|
||||
from frigate.config.camera.review import GenAIReviewConfig, ImageSourceEnum
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, UPDATE_REVIEW_DESCRIPTION
|
||||
from frigate.data_processing.types import PostProcessDataEnum
|
||||
from frigate.genai import GenAIClient
|
||||
from frigate.models import ReviewSegment
|
||||
from frigate.models import Recordings, ReviewSegment
|
||||
from frigate.util.builtin import EventsPerSecond, InferenceSpeed
|
||||
from frigate.util.image import get_image_from_recording
|
||||
|
||||
from ..post.api import PostProcessorApi
|
||||
from ..types import DataProcessorMetrics
|
||||
@ -43,20 +46,35 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
self.review_descs_dps = EventsPerSecond()
|
||||
self.review_descs_dps.start()
|
||||
|
||||
def calculate_frame_count(self) -> int:
|
||||
"""Calculate optimal number of frames based on context size."""
|
||||
# With our preview images (height of 180px) each image should be ~100 tokens per image
|
||||
# We want to be conservative to not have too long of query times with too many images
|
||||
def calculate_frame_count(
|
||||
self, image_source: ImageSourceEnum = ImageSourceEnum.preview
|
||||
) -> int:
|
||||
"""Calculate optimal number of frames based on context size and image source."""
|
||||
context_size = self.genai_client.get_context_size()
|
||||
|
||||
if context_size > 10000:
|
||||
return 20
|
||||
elif context_size > 6000:
|
||||
return 16
|
||||
elif context_size > 4000:
|
||||
return 12
|
||||
if image_source == ImageSourceEnum.recordings:
|
||||
# With recordings at 480p resolution (480px height), each image uses ~200-300 tokens
|
||||
# This is ~2-3x more than preview images, so we reduce frame count accordingly
|
||||
# to avoid exceeding context limits and maintain reasonable inference times
|
||||
if context_size > 10000:
|
||||
return 12
|
||||
elif context_size > 6000:
|
||||
return 10
|
||||
elif context_size > 4000:
|
||||
return 8
|
||||
else:
|
||||
return 6
|
||||
else:
|
||||
return 8
|
||||
# With preview images (180px height), each image uses ~100 tokens
|
||||
# We can send more frames since they're lower resolution
|
||||
if context_size > 10000:
|
||||
return 20
|
||||
elif context_size > 6000:
|
||||
return 16
|
||||
elif context_size > 4000:
|
||||
return 12
|
||||
else:
|
||||
return 8
|
||||
|
||||
def process_data(self, data, data_type):
|
||||
self.metrics.review_desc_dps.value = self.review_descs_dps.eps()
|
||||
@ -88,36 +106,50 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
):
|
||||
return
|
||||
|
||||
frames = self.get_cache_frames(
|
||||
camera, final_data["start_time"], final_data["end_time"]
|
||||
)
|
||||
image_source = camera_config.review.genai.image_source
|
||||
|
||||
if not frames:
|
||||
frames = [final_data["thumb_path"]]
|
||||
|
||||
thumbs = []
|
||||
|
||||
for idx, thumb_path in enumerate(frames):
|
||||
thumb_data = cv2.imread(thumb_path)
|
||||
ret, jpg = cv2.imencode(
|
||||
".jpg", thumb_data, [int(cv2.IMWRITE_JPEG_QUALITY), 100]
|
||||
if image_source == ImageSourceEnum.recordings:
|
||||
thumbs = self.get_recording_frames(
|
||||
camera,
|
||||
final_data["start_time"],
|
||||
final_data["end_time"],
|
||||
height=480, # Use 480p for good balance between quality and token usage
|
||||
)
|
||||
|
||||
if ret:
|
||||
thumbs.append(jpg.tobytes())
|
||||
|
||||
if camera_config.review.genai.debug_save_thumbnails:
|
||||
id = data["after"]["id"]
|
||||
Path(os.path.join(CLIPS_DIR, "genai-requests", f"{id}")).mkdir(
|
||||
if not thumbs:
|
||||
# Fallback to preview frames if no recordings available
|
||||
logger.warning(
|
||||
f"No recording frames found for {camera}, falling back to preview frames"
|
||||
)
|
||||
thumbs = self.get_preview_frames_as_bytes(
|
||||
camera,
|
||||
final_data["start_time"],
|
||||
final_data["end_time"],
|
||||
final_data["thumb_path"],
|
||||
id,
|
||||
camera_config.review.genai.debug_save_thumbnails,
|
||||
)
|
||||
elif camera_config.review.genai.debug_save_thumbnails:
|
||||
# Save debug thumbnails for recordings
|
||||
Path(os.path.join(CLIPS_DIR, "genai-requests", id)).mkdir(
|
||||
parents=True, exist_ok=True
|
||||
)
|
||||
shutil.copy(
|
||||
thumb_path,
|
||||
os.path.join(
|
||||
CLIPS_DIR,
|
||||
f"genai-requests/{id}/{idx}.webp",
|
||||
),
|
||||
)
|
||||
for idx, frame_bytes in enumerate(thumbs):
|
||||
with open(
|
||||
os.path.join(CLIPS_DIR, f"genai-requests/{id}/{idx}.jpg"),
|
||||
"wb",
|
||||
) as f:
|
||||
f.write(frame_bytes)
|
||||
else:
|
||||
# Use preview frames
|
||||
thumbs = self.get_preview_frames_as_bytes(
|
||||
camera,
|
||||
final_data["start_time"],
|
||||
final_data["end_time"],
|
||||
final_data["thumb_path"],
|
||||
id,
|
||||
camera_config.review.genai.debug_save_thumbnails,
|
||||
)
|
||||
|
||||
# kickoff analysis
|
||||
self.review_descs_dps.update()
|
||||
@ -231,6 +263,122 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
|
||||
return selected_frames
|
||||
|
||||
def get_recording_frames(
|
||||
self,
|
||||
camera: str,
|
||||
start_time: float,
|
||||
end_time: float,
|
||||
height: int = 480,
|
||||
) -> list[bytes]:
|
||||
"""Get frames from recordings at specified timestamps."""
|
||||
duration = end_time - start_time
|
||||
desired_frame_count = self.calculate_frame_count(ImageSourceEnum.recordings)
|
||||
|
||||
# Calculate evenly spaced timestamps throughout the duration
|
||||
if desired_frame_count == 1:
|
||||
timestamps = [start_time + duration / 2]
|
||||
else:
|
||||
step = duration / (desired_frame_count - 1)
|
||||
timestamps = [start_time + (i * step) for i in range(desired_frame_count)]
|
||||
|
||||
def extract_frame_from_recording(ts: float) -> bytes | None:
|
||||
"""Extract a single frame from recording at given timestamp."""
|
||||
try:
|
||||
recording = (
|
||||
Recordings.select(
|
||||
Recordings.path,
|
||||
Recordings.start_time,
|
||||
)
|
||||
.where((ts >= Recordings.start_time) & (ts <= Recordings.end_time))
|
||||
.where(Recordings.camera == camera)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.limit(1)
|
||||
.get()
|
||||
)
|
||||
|
||||
time_in_segment = ts - recording.start_time
|
||||
return get_image_from_recording(
|
||||
self.config.ffmpeg,
|
||||
recording.path,
|
||||
time_in_segment,
|
||||
"mjpeg",
|
||||
height=height,
|
||||
)
|
||||
except DoesNotExist:
|
||||
return None
|
||||
|
||||
frames = []
|
||||
|
||||
for timestamp in timestamps:
|
||||
try:
|
||||
# Try to extract frame at exact timestamp
|
||||
image_data = extract_frame_from_recording(timestamp)
|
||||
|
||||
if not image_data:
|
||||
# Try with rounded timestamp as fallback
|
||||
rounded_timestamp = math.ceil(timestamp)
|
||||
image_data = extract_frame_from_recording(rounded_timestamp)
|
||||
|
||||
if image_data:
|
||||
frames.append(image_data)
|
||||
else:
|
||||
logger.warning(
|
||||
f"No recording found for {camera} at timestamp {timestamp}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error extracting frame from recording for {camera} at {timestamp}: {e}"
|
||||
)
|
||||
continue
|
||||
|
||||
return frames
|
||||
|
||||
def get_preview_frames_as_bytes(
|
||||
self,
|
||||
camera: str,
|
||||
start_time: float,
|
||||
end_time: float,
|
||||
thumb_path_fallback: str,
|
||||
review_id: str,
|
||||
save_debug: bool,
|
||||
) -> list[bytes]:
|
||||
"""Get preview frames and convert them to JPEG bytes.
|
||||
|
||||
Args:
|
||||
camera: Camera name
|
||||
start_time: Start timestamp
|
||||
end_time: End timestamp
|
||||
thumb_path_fallback: Fallback thumbnail path if no preview frames found
|
||||
review_id: Review item ID for debug saving
|
||||
save_debug: Whether to save debug thumbnails
|
||||
|
||||
Returns:
|
||||
List of JPEG image bytes
|
||||
"""
|
||||
frame_paths = self.get_cache_frames(camera, start_time, end_time)
|
||||
if not frame_paths:
|
||||
frame_paths = [thumb_path_fallback]
|
||||
|
||||
thumbs = []
|
||||
for idx, thumb_path in enumerate(frame_paths):
|
||||
thumb_data = cv2.imread(thumb_path)
|
||||
ret, jpg = cv2.imencode(
|
||||
".jpg", thumb_data, [int(cv2.IMWRITE_JPEG_QUALITY), 100]
|
||||
)
|
||||
if ret:
|
||||
thumbs.append(jpg.tobytes())
|
||||
|
||||
if save_debug:
|
||||
Path(os.path.join(CLIPS_DIR, "genai-requests", review_id)).mkdir(
|
||||
parents=True, exist_ok=True
|
||||
)
|
||||
shutil.copy(
|
||||
thumb_path,
|
||||
os.path.join(CLIPS_DIR, f"genai-requests/{review_id}/{idx}.webp"),
|
||||
)
|
||||
|
||||
return thumbs
|
||||
|
||||
|
||||
@staticmethod
|
||||
def run_analysis(
|
||||
@ -254,25 +402,25 @@ def run_analysis(
|
||||
"duration": round(final_data["end_time"] - final_data["start_time"]),
|
||||
}
|
||||
|
||||
objects = []
|
||||
named_objects = []
|
||||
unified_objects = []
|
||||
|
||||
objects_list = final_data["data"]["objects"]
|
||||
sub_labels_list = final_data["data"]["sub_labels"]
|
||||
|
||||
for i, verified_label in enumerate(final_data["data"]["verified_objects"]):
|
||||
object_type = verified_label.replace("-verified", "").replace("_", " ")
|
||||
name = sub_labels_list[i].replace("_", " ").title()
|
||||
unified_objects.append(f"{name} ({object_type})")
|
||||
|
||||
# Add non-verified objects as "Unknown (type)"
|
||||
for label in objects_list:
|
||||
if "-verified" in label:
|
||||
continue
|
||||
elif label in labelmap_objects:
|
||||
objects.append(label.replace("_", " ").title())
|
||||
object_type = label.replace("_", " ")
|
||||
unified_objects.append(f"Unknown ({object_type})")
|
||||
|
||||
for i, verified_label in enumerate(final_data["data"]["verified_objects"]):
|
||||
named_objects.append(
|
||||
f"{sub_labels_list[i].replace('_', ' ').title()} ({verified_label.replace('-verified', '')})"
|
||||
)
|
||||
|
||||
analytics_data["objects"] = objects
|
||||
analytics_data["recognized_objects"] = named_objects
|
||||
analytics_data["unified_objects"] = unified_objects
|
||||
|
||||
metadata = genai_client.generate_review_description(
|
||||
analytics_data,
|
||||
|
||||
@ -34,6 +34,8 @@ except ModuleNotFoundError:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MAX_OBJECT_CLASSIFICATIONS = 16
|
||||
|
||||
|
||||
class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
def __init__(
|
||||
@ -396,6 +398,18 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
if obj_data.get("end_time") is not None:
|
||||
return
|
||||
|
||||
if obj_data.get("stationary"):
|
||||
return
|
||||
|
||||
object_id = obj_data["id"]
|
||||
|
||||
if (
|
||||
object_id in self.classification_history
|
||||
and len(self.classification_history[object_id])
|
||||
>= MAX_OBJECT_CLASSIFICATIONS
|
||||
):
|
||||
return
|
||||
|
||||
now = datetime.datetime.now().timestamp()
|
||||
x, y, x2, y2 = calculate_region(
|
||||
frame.shape,
|
||||
@ -427,7 +441,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
|
||||
obj_data["id"],
|
||||
object_id,
|
||||
now,
|
||||
"unknown",
|
||||
0.0,
|
||||
@ -448,7 +462,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
|
||||
obj_data["id"],
|
||||
object_id,
|
||||
now,
|
||||
self.labelmap[best_id],
|
||||
score,
|
||||
@ -461,7 +475,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
sub_label = self.labelmap[best_id]
|
||||
|
||||
consensus_label, consensus_score = self.get_weighted_score(
|
||||
obj_data["id"], sub_label, score, now
|
||||
object_id, sub_label, score, now
|
||||
)
|
||||
|
||||
if consensus_label is not None:
|
||||
@ -470,7 +484,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
== ObjectClassificationType.sub_label
|
||||
):
|
||||
self.sub_label_publisher.publish(
|
||||
(obj_data["id"], consensus_label, consensus_score),
|
||||
(object_id, consensus_label, consensus_score),
|
||||
EventMetadataTypeEnum.sub_label,
|
||||
)
|
||||
elif (
|
||||
@ -479,7 +493,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
):
|
||||
self.sub_label_publisher.publish(
|
||||
(
|
||||
obj_data["id"],
|
||||
object_id,
|
||||
self.model_config.name,
|
||||
consensus_label,
|
||||
consensus_score,
|
||||
|
||||
@ -63,20 +63,17 @@ class GenAIClient:
|
||||
else:
|
||||
return ""
|
||||
|
||||
def get_verified_object_prompt() -> str:
|
||||
if review_data["recognized_objects"]:
|
||||
object_list = " - " + "\n - ".join(review_data["recognized_objects"])
|
||||
return f"""## Verified Objects (USE THESE NAMES)
|
||||
When any of the following verified objects are present in the scene, you MUST use these exact names in your title and scene description:
|
||||
{object_list}
|
||||
"""
|
||||
def get_objects_list() -> str:
|
||||
if review_data["unified_objects"]:
|
||||
return "\n- " + "\n- ".join(review_data["unified_objects"])
|
||||
else:
|
||||
return ""
|
||||
return "\n- (No objects detected)"
|
||||
|
||||
context_prompt = f"""
|
||||
Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"].replace("_", " ")} security camera.
|
||||
|
||||
## Normal Activity Patterns for This Property
|
||||
|
||||
{activity_context_prompt}
|
||||
|
||||
## Task Instructions
|
||||
@ -91,7 +88,7 @@ Your task is to provide a clear, accurate description of the scene that:
|
||||
## Analysis Guidelines
|
||||
|
||||
When forming your description:
|
||||
- **CRITICAL: Only describe objects explicitly listed in "Detected objects" below.** Do not infer or mention additional people, vehicles, or objects not present in the detected objects list, even if visual patterns suggest them. If only a car is detected, do not describe a person interacting with it unless "person" is also in the detected objects list.
|
||||
- **CRITICAL: Only describe objects explicitly listed in "Objects in Scene" below.** Do not infer or mention additional people, vehicles, or objects not present in this list, even if visual patterns suggest them. If only a car is listed, do not describe a person interacting with it unless "person" is also in the objects list.
|
||||
- **Only describe actions actually visible in the frames.** Do not assume or infer actions that you don't observe happening. If someone walks toward furniture but you never see them sit, do not say they sat. Stick to what you can see across the sequence.
|
||||
- Describe what you observe: actions, movements, interactions with objects and the environment. Include any observable environmental changes (e.g., lighting changes triggered by activity).
|
||||
- Note visible details such as clothing, items being carried or placed, tools or equipment present, and how they interact with the property or objects.
|
||||
@ -103,7 +100,7 @@ When forming your description:
|
||||
## Response Format
|
||||
|
||||
Your response MUST be a flat JSON object with:
|
||||
- `title` (string): A concise, one-sentence title that captures the main activity. Include any verified recognized objects (from the "Verified recognized objects" list below) and key detected objects. Examples: "Joe walking dog in backyard", "Unknown person testing car doors at night".
|
||||
- `title` (string): A concise, one-sentence title that captures the main activity. Use the exact names from "Objects in Scene" below (e.g., if the list shows "Joe (person)" and "Unknown (person)", say "Joe and unknown person"). Examples: "Joe walking dog in backyard", "Unknown person testing car doors at night", "Joe and unknown person in driveway".
|
||||
- `scene` (string): A narrative description of what happens across the sequence from start to finish. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign.
|
||||
- `confidence` (float): 0-1 confidence in your analysis. Higher confidence when objects/actions are clearly visible and context is unambiguous. Lower confidence when the sequence is unclear, objects are partially obscured, or context is ambiguous.
|
||||
- `potential_threat_level` (integer): 0, 1, or 2 as defined below. Your threat level must be consistent with your scene description and the guidance above.
|
||||
@ -119,14 +116,17 @@ Your response MUST be a flat JSON object with:
|
||||
|
||||
- Frame 1 = earliest, Frame {len(thumbnails)} = latest
|
||||
- Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds
|
||||
- Detected objects: {", ".join(review_data["objects"])}
|
||||
- Zones involved: {", ".join(z.replace("_", " ").title() for z in review_data["zones"]) or "None"}
|
||||
|
||||
{get_verified_object_prompt()}
|
||||
## Objects in Scene
|
||||
|
||||
Each line represents one object in the scene. Named objects are verified identities; "Unknown" indicates unverified objects of that type:
|
||||
{get_objects_list()}
|
||||
|
||||
## Important Notes
|
||||
- Values must be plain strings, floats, or integers — no nested objects, no extra commentary.
|
||||
- Only describe objects from the "Detected objects" list above. Do not hallucinate additional objects.
|
||||
- Only describe objects from the "Objects in Scene" list above. Do not hallucinate additional objects.
|
||||
- When describing people or vehicles, use the exact names provided.
|
||||
{get_language_prompt()}
|
||||
"""
|
||||
logger.debug(
|
||||
@ -161,7 +161,10 @@ Your response MUST be a flat JSON object with:
|
||||
try:
|
||||
metadata = ReviewMetadata.model_validate_json(clean_json)
|
||||
|
||||
if review_data["recognized_objects"]:
|
||||
if any(
|
||||
not obj.startswith("Unknown")
|
||||
for obj in review_data["unified_objects"]
|
||||
):
|
||||
metadata.potential_threat_level = 0
|
||||
|
||||
metadata.time = review_data["start"]
|
||||
|
||||
@ -52,7 +52,7 @@
|
||||
"export": "Export",
|
||||
"selectOrExport": "Select or Export",
|
||||
"toast": {
|
||||
"success": "Successfully started export. View the file in the /exports folder.",
|
||||
"success": "Successfully started export. View the file in the exports page.",
|
||||
"error": {
|
||||
"failed": "Failed to start export: {{error}}",
|
||||
"endTimeMustAfterStartTime": "End time must be after start time",
|
||||
|
||||
@ -36,8 +36,8 @@
|
||||
"video": "video",
|
||||
"object_lifecycle": "object lifecycle"
|
||||
},
|
||||
"objectLifecycle": {
|
||||
"title": "Object Lifecycle",
|
||||
"trackingDetails": {
|
||||
"title": "Tracking Details",
|
||||
"noImageFound": "No image found for this timestamp.",
|
||||
"createObjectMask": "Create Object Mask",
|
||||
"adjustAnnotationSettings": "Adjust annotation settings",
|
||||
@ -168,9 +168,9 @@
|
||||
"label": "Download snapshot",
|
||||
"aria": "Download snapshot"
|
||||
},
|
||||
"viewObjectLifecycle": {
|
||||
"label": "View object lifecycle",
|
||||
"aria": "Show the object lifecycle"
|
||||
"viewTrackingDetails": {
|
||||
"label": "View tracking details",
|
||||
"aria": "Show the tracking details"
|
||||
},
|
||||
"findSimilar": {
|
||||
"label": "Find similar",
|
||||
@ -205,7 +205,7 @@
|
||||
"dialog": {
|
||||
"confirmDelete": {
|
||||
"title": "Confirm Delete",
|
||||
"desc": "Deleting this tracked object removes the snapshot, any saved embeddings, and any associated object lifecycle entries. Recorded footage of this tracked object in History view will <em>NOT</em> be deleted.<br /><br />Are you sure you want to proceed?"
|
||||
"desc": "Deleting this tracked object removes the snapshot, any saved embeddings, and any associated tracking details entries. Recorded footage of this tracked object in History view will <em>NOT</em> be deleted.<br /><br />Are you sure you want to proceed?"
|
||||
}
|
||||
},
|
||||
"noTrackedObjects": "No Tracked Objects Found",
|
||||
|
||||
@ -34,7 +34,7 @@ import { toast } from "sonner";
|
||||
import useKeyboardListener from "@/hooks/use-keyboard-listener";
|
||||
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip";
|
||||
import { capitalizeFirstLetter } from "@/utils/stringUtil";
|
||||
import { buttonVariants } from "../ui/button";
|
||||
import { Button, buttonVariants } from "../ui/button";
|
||||
import { Trans, useTranslation } from "react-i18next";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
@ -83,6 +83,11 @@ export default function ReviewCard({
|
||||
if (response.status == 200) {
|
||||
toast.success(t("export.toast.success"), {
|
||||
position: "top-center",
|
||||
action: (
|
||||
<a href="/export" target="_blank" rel="noopener noreferrer">
|
||||
<Button>View</Button>
|
||||
</a>
|
||||
),
|
||||
});
|
||||
}
|
||||
})
|
||||
|
||||
@ -13,7 +13,7 @@ type SearchThumbnailProps = {
|
||||
columns: number;
|
||||
findSimilar: () => void;
|
||||
refreshResults: () => void;
|
||||
showObjectLifecycle: () => void;
|
||||
showTrackingDetails: () => void;
|
||||
showSnapshot: () => void;
|
||||
addTrigger: () => void;
|
||||
};
|
||||
@ -23,7 +23,7 @@ export default function SearchThumbnailFooter({
|
||||
columns,
|
||||
findSimilar,
|
||||
refreshResults,
|
||||
showObjectLifecycle,
|
||||
showTrackingDetails,
|
||||
showSnapshot,
|
||||
addTrigger,
|
||||
}: SearchThumbnailProps) {
|
||||
@ -61,7 +61,7 @@ export default function SearchThumbnailFooter({
|
||||
searchResult={searchResult}
|
||||
findSimilar={findSimilar}
|
||||
refreshResults={refreshResults}
|
||||
showObjectLifecycle={showObjectLifecycle}
|
||||
showTrackingDetails={showTrackingDetails}
|
||||
showSnapshot={showSnapshot}
|
||||
addTrigger={addTrigger}
|
||||
/>
|
||||
|
||||
@ -47,7 +47,7 @@ type SearchResultActionsProps = {
|
||||
searchResult: SearchResult;
|
||||
findSimilar: () => void;
|
||||
refreshResults: () => void;
|
||||
showObjectLifecycle: () => void;
|
||||
showTrackingDetails: () => void;
|
||||
showSnapshot: () => void;
|
||||
addTrigger: () => void;
|
||||
isContextMenu?: boolean;
|
||||
@ -58,7 +58,7 @@ export default function SearchResultActions({
|
||||
searchResult,
|
||||
findSimilar,
|
||||
refreshResults,
|
||||
showObjectLifecycle,
|
||||
showTrackingDetails,
|
||||
showSnapshot,
|
||||
addTrigger,
|
||||
isContextMenu = false,
|
||||
@ -125,11 +125,11 @@ export default function SearchResultActions({
|
||||
)}
|
||||
{searchResult.data.type == "object" && (
|
||||
<MenuItem
|
||||
aria-label={t("itemMenu.viewObjectLifecycle.aria")}
|
||||
onClick={showObjectLifecycle}
|
||||
aria-label={t("itemMenu.viewTrackingDetails.aria")}
|
||||
onClick={showTrackingDetails}
|
||||
>
|
||||
<FaArrowsRotate className="mr-2 size-4" />
|
||||
<span>{t("itemMenu.viewObjectLifecycle.label")}</span>
|
||||
<span>{t("itemMenu.viewTrackingDetails.label")}</span>
|
||||
</MenuItem>
|
||||
)}
|
||||
{config?.semantic_search?.enabled && isContextMenu && (
|
||||
|
||||
@ -95,6 +95,11 @@ export default function ExportDialog({
|
||||
if (response.status == 200) {
|
||||
toast.success(t("export.toast.success"), {
|
||||
position: "top-center",
|
||||
action: (
|
||||
<a href="/export" target="_blank" rel="noopener noreferrer">
|
||||
<Button>View</Button>
|
||||
</a>
|
||||
),
|
||||
});
|
||||
setName("");
|
||||
setRange(undefined);
|
||||
|
||||
@ -104,6 +104,11 @@ export default function MobileReviewSettingsDrawer({
|
||||
t("export.toast.success", { ns: "components/dialog" }),
|
||||
{
|
||||
position: "top-center",
|
||||
action: (
|
||||
<a href="/export" target="_blank" rel="noopener noreferrer">
|
||||
<Button>View</Button>
|
||||
</a>
|
||||
),
|
||||
},
|
||||
);
|
||||
setName("");
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import { useMemo, useCallback } from "react";
|
||||
import { ObjectLifecycleSequence, LifecycleClassType } from "@/types/timeline";
|
||||
import { TrackingDetailsSequence, LifecycleClassType } from "@/types/timeline";
|
||||
import { FrigateConfig } from "@/types/frigateConfig";
|
||||
import useSWR from "swr";
|
||||
import { useDetailStream } from "@/context/detail-stream-context";
|
||||
@ -28,7 +28,7 @@ type PathPoint = {
|
||||
x: number;
|
||||
y: number;
|
||||
timestamp: number;
|
||||
lifecycle_item?: ObjectLifecycleSequence;
|
||||
lifecycle_item?: TrackingDetailsSequence;
|
||||
objectId: string;
|
||||
};
|
||||
|
||||
@ -64,7 +64,7 @@ export default function ObjectTrackOverlay({
|
||||
);
|
||||
|
||||
// Fetch timeline data for each object ID using fixed number of hooks
|
||||
const { data: timelineData } = useSWR<ObjectLifecycleSequence[]>(
|
||||
const { data: timelineData } = useSWR<TrackingDetailsSequence[]>(
|
||||
selectedObjectIds.length > 0
|
||||
? `timeline?source_id=${selectedObjectIds.join(",")}&limit=1000`
|
||||
: null,
|
||||
@ -78,7 +78,7 @@ export default function ObjectTrackOverlay({
|
||||
const timelineResults = useMemo(() => {
|
||||
if (!timelineData) return selectedObjectIds.map(() => []);
|
||||
|
||||
const grouped: Record<string, ObjectLifecycleSequence[]> = {};
|
||||
const grouped: Record<string, TrackingDetailsSequence[]> = {};
|
||||
for (const entry of timelineData) {
|
||||
if (!grouped[entry.source_id]) {
|
||||
grouped[entry.source_id] = [];
|
||||
@ -166,9 +166,9 @@ export default function ObjectTrackOverlay({
|
||||
const eventSequencePoints: PathPoint[] =
|
||||
timelineData
|
||||
?.filter(
|
||||
(event: ObjectLifecycleSequence) => event.data.box !== undefined,
|
||||
(event: TrackingDetailsSequence) => event.data.box !== undefined,
|
||||
)
|
||||
.map((event: ObjectLifecycleSequence) => {
|
||||
.map((event: TrackingDetailsSequence) => {
|
||||
const [left, top, width, height] = event.data.box!;
|
||||
return {
|
||||
x: left + width / 2, // Center x
|
||||
@ -197,22 +197,22 @@ export default function ObjectTrackOverlay({
|
||||
const currentZones =
|
||||
timelineData
|
||||
?.filter(
|
||||
(event: ObjectLifecycleSequence) =>
|
||||
(event: TrackingDetailsSequence) =>
|
||||
event.timestamp <= effectiveCurrentTime,
|
||||
)
|
||||
.sort(
|
||||
(a: ObjectLifecycleSequence, b: ObjectLifecycleSequence) =>
|
||||
(a: TrackingDetailsSequence, b: TrackingDetailsSequence) =>
|
||||
b.timestamp - a.timestamp,
|
||||
)[0]?.data?.zones || [];
|
||||
|
||||
// Get current bounding box
|
||||
const currentBox = timelineData
|
||||
?.filter(
|
||||
(event: ObjectLifecycleSequence) =>
|
||||
(event: TrackingDetailsSequence) =>
|
||||
event.timestamp <= effectiveCurrentTime && event.data.box,
|
||||
)
|
||||
.sort(
|
||||
(a: ObjectLifecycleSequence, b: ObjectLifecycleSequence) =>
|
||||
(a: TrackingDetailsSequence, b: TrackingDetailsSequence) =>
|
||||
b.timestamp - a.timestamp,
|
||||
)[0]?.data?.box;
|
||||
|
||||
|
||||
@ -40,7 +40,7 @@ export default function AnnotationOffsetSlider({ className }: Props) {
|
||||
);
|
||||
|
||||
toast.success(
|
||||
t("objectLifecycle.annotationSettings.offset.toast.success", {
|
||||
t("trackingDetails.annotationSettings.offset.toast.success", {
|
||||
camera,
|
||||
}),
|
||||
{ position: "top-center" },
|
||||
|
||||
@ -79,7 +79,7 @@ export function AnnotationSettingsPane({
|
||||
.then((res) => {
|
||||
if (res.status === 200) {
|
||||
toast.success(
|
||||
t("objectLifecycle.annotationSettings.offset.toast.success", {
|
||||
t("trackingDetails.annotationSettings.offset.toast.success", {
|
||||
camera: event?.camera,
|
||||
}),
|
||||
{
|
||||
@ -142,7 +142,7 @@ export function AnnotationSettingsPane({
|
||||
return (
|
||||
<div className="mb-3 space-y-3 rounded-lg border border-secondary-foreground bg-background_alt p-2">
|
||||
<Heading as="h4" className="my-2">
|
||||
{t("objectLifecycle.annotationSettings.title")}
|
||||
{t("trackingDetails.annotationSettings.title")}
|
||||
</Heading>
|
||||
<div className="flex flex-col">
|
||||
<div className="flex flex-row items-center justify-start gap-2 p-3">
|
||||
@ -152,11 +152,11 @@ export function AnnotationSettingsPane({
|
||||
onCheckedChange={setShowZones}
|
||||
/>
|
||||
<Label className="cursor-pointer" htmlFor="show-zones">
|
||||
{t("objectLifecycle.annotationSettings.showAllZones.title")}
|
||||
{t("trackingDetails.annotationSettings.showAllZones.title")}
|
||||
</Label>
|
||||
</div>
|
||||
<div className="text-sm text-muted-foreground">
|
||||
{t("objectLifecycle.annotationSettings.showAllZones.desc")}
|
||||
{t("trackingDetails.annotationSettings.showAllZones.desc")}
|
||||
</div>
|
||||
</div>
|
||||
<Separator className="my-2 flex bg-secondary" />
|
||||
@ -171,14 +171,14 @@ export function AnnotationSettingsPane({
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>
|
||||
{t("objectLifecycle.annotationSettings.offset.label")}
|
||||
{t("trackingDetails.annotationSettings.offset.label")}
|
||||
</FormLabel>
|
||||
<div className="flex flex-col gap-3 md:flex-row-reverse md:gap-8">
|
||||
<div className="flex flex-row items-center gap-3 rounded-lg bg-destructive/50 p-3 text-sm text-primary-variant md:my-5">
|
||||
<PiWarningCircle className="size-24" />
|
||||
<div>
|
||||
<Trans ns="views/explore">
|
||||
objectLifecycle.annotationSettings.offset.desc
|
||||
trackingDetails.annotationSettings.offset.desc
|
||||
</Trans>
|
||||
<div className="mt-2 flex items-center text-primary">
|
||||
<Link
|
||||
@ -203,10 +203,10 @@ export function AnnotationSettingsPane({
|
||||
</FormControl>
|
||||
<FormDescription>
|
||||
<Trans ns="views/explore">
|
||||
objectLifecycle.annotationSettings.offset.millisecondsToOffset
|
||||
trackingDetails.annotationSettings.offset.millisecondsToOffset
|
||||
</Trans>
|
||||
<div className="mt-2">
|
||||
{t("objectLifecycle.annotationSettings.offset.tips")}
|
||||
{t("trackingDetails.annotationSettings.offset.tips")}
|
||||
</div>
|
||||
</FormDescription>
|
||||
</div>
|
||||
|
||||
@ -123,7 +123,7 @@ export function ObjectPath({
|
||||
<Trans>
|
||||
{pos.lifecycle_item
|
||||
? getLifecycleItemDescription(pos.lifecycle_item)
|
||||
: t("objectLifecycle.trackedPoint")}
|
||||
: t("trackingDetails.trackedPoint")}
|
||||
</Trans>
|
||||
</TooltipContent>
|
||||
</TooltipPortal>
|
||||
|
||||
@ -20,7 +20,7 @@ import { Event } from "@/types/event";
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { FrigatePlusDialog } from "../dialog/FrigatePlusDialog";
|
||||
import ObjectLifecycle from "./ObjectLifecycle";
|
||||
import TrackingDetails from "./TrackingDetails";
|
||||
import Chip from "@/components/indicators/Chip";
|
||||
import { FaDownload, FaImages, FaShareAlt } from "react-icons/fa";
|
||||
import FrigatePlusIcon from "@/components/icons/FrigatePlusIcon";
|
||||
@ -411,7 +411,7 @@ export default function ReviewDetailDialog({
|
||||
|
||||
{pane == "details" && selectedEvent && (
|
||||
<div className="mt-0 flex size-full flex-col gap-2">
|
||||
<ObjectLifecycle event={selectedEvent} setPane={setPane} />
|
||||
<TrackingDetails event={selectedEvent} setPane={setPane} />
|
||||
</div>
|
||||
)}
|
||||
</Content>
|
||||
@ -544,7 +544,7 @@ function EventItem({
|
||||
</Chip>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
{t("itemMenu.viewObjectLifecycle.label")}
|
||||
{t("itemMenu.viewTrackingDetails.label")}
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
)}
|
||||
|
||||
@ -34,8 +34,7 @@ import {
|
||||
FaRegListAlt,
|
||||
FaVideo,
|
||||
} from "react-icons/fa";
|
||||
import { FaRotate } from "react-icons/fa6";
|
||||
import ObjectLifecycle from "./ObjectLifecycle";
|
||||
import TrackingDetails from "./TrackingDetails";
|
||||
import {
|
||||
MobilePage,
|
||||
MobilePageContent,
|
||||
@ -80,12 +79,13 @@ import FaceSelectionDialog from "../FaceSelectionDialog";
|
||||
import { getTranslatedLabel } from "@/utils/i18n";
|
||||
import { CgTranscript } from "react-icons/cg";
|
||||
import { CameraNameLabel } from "@/components/camera/FriendlyNameLabel";
|
||||
import { PiPath } from "react-icons/pi";
|
||||
|
||||
const SEARCH_TABS = [
|
||||
"details",
|
||||
"snapshot",
|
||||
"video",
|
||||
"object_lifecycle",
|
||||
"tracking_details",
|
||||
] as const;
|
||||
export type SearchTab = (typeof SEARCH_TABS)[number];
|
||||
|
||||
@ -160,7 +160,7 @@ export default function SearchDetailDialog({
|
||||
}
|
||||
|
||||
if (search.data.type != "object" || !search.has_clip) {
|
||||
const index = views.indexOf("object_lifecycle");
|
||||
const index = views.indexOf("tracking_details");
|
||||
views.splice(index, 1);
|
||||
}
|
||||
|
||||
@ -235,9 +235,7 @@ export default function SearchDetailDialog({
|
||||
{item == "details" && <FaRegListAlt className="size-4" />}
|
||||
{item == "snapshot" && <FaImage className="size-4" />}
|
||||
{item == "video" && <FaVideo className="size-4" />}
|
||||
{item == "object_lifecycle" && (
|
||||
<FaRotate className="size-4" />
|
||||
)}
|
||||
{item == "tracking_details" && <PiPath className="size-4" />}
|
||||
<div className="smart-capitalize">{t(`type.${item}`)}</div>
|
||||
</ToggleGroupItem>
|
||||
))}
|
||||
@ -268,8 +266,8 @@ export default function SearchDetailDialog({
|
||||
/>
|
||||
)}
|
||||
{page == "video" && <VideoTab search={search} />}
|
||||
{page == "object_lifecycle" && (
|
||||
<ObjectLifecycle
|
||||
{page == "tracking_details" && (
|
||||
<TrackingDetails
|
||||
className="w-full overflow-x-hidden"
|
||||
event={search as unknown as Event}
|
||||
fullscreen={true}
|
||||
|
||||
@ -3,7 +3,7 @@ import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import { Event } from "@/types/event";
|
||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { ObjectLifecycleSequence } from "@/types/timeline";
|
||||
import { TrackingDetailsSequence } from "@/types/timeline";
|
||||
import Heading from "@/components/ui/heading";
|
||||
import { ReviewDetailPaneType } from "@/types/review";
|
||||
import { FrigateConfig } from "@/types/frigateConfig";
|
||||
@ -41,6 +41,13 @@ import {
|
||||
ContextMenuItem,
|
||||
ContextMenuTrigger,
|
||||
} from "@/components/ui/context-menu";
|
||||
import {
|
||||
DropdownMenu,
|
||||
DropdownMenuTrigger,
|
||||
DropdownMenuContent,
|
||||
DropdownMenuItem,
|
||||
DropdownMenuPortal,
|
||||
} from "@/components/ui/dropdown-menu";
|
||||
import { Link, useNavigate } from "react-router-dom";
|
||||
import { ObjectPath } from "./ObjectPath";
|
||||
import { getLifecycleItemDescription } from "@/utils/lifecycleUtil";
|
||||
@ -49,23 +56,27 @@ import { Trans, useTranslation } from "react-i18next";
|
||||
import { getTranslatedLabel } from "@/utils/i18n";
|
||||
import { resolveZoneName } from "@/hooks/use-zone-friendly-name";
|
||||
import { Badge } from "@/components/ui/badge";
|
||||
import { HiDotsHorizontal } from "react-icons/hi";
|
||||
import axios from "axios";
|
||||
import { toast } from "sonner";
|
||||
|
||||
type ObjectLifecycleProps = {
|
||||
type TrackingDetailsProps = {
|
||||
className?: string;
|
||||
event: Event;
|
||||
fullscreen?: boolean;
|
||||
setPane: React.Dispatch<React.SetStateAction<ReviewDetailPaneType>>;
|
||||
};
|
||||
|
||||
export default function ObjectLifecycle({
|
||||
export default function TrackingDetails({
|
||||
className,
|
||||
event,
|
||||
fullscreen = false,
|
||||
setPane,
|
||||
}: ObjectLifecycleProps) {
|
||||
}: TrackingDetailsProps) {
|
||||
const { t } = useTranslation(["views/explore"]);
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
const { data: eventSequence } = useSWR<ObjectLifecycleSequence[]>([
|
||||
|
||||
const { data: eventSequence } = useSWR<TrackingDetailsSequence[]>([
|
||||
"timeline",
|
||||
{
|
||||
source_id: event.id,
|
||||
@ -453,7 +464,7 @@ export default function ObjectLifecycle({
|
||||
<div className="relative aspect-video">
|
||||
<div className="flex flex-col items-center justify-center p-20 text-center">
|
||||
<LuFolderX className="size-16" />
|
||||
{t("objectLifecycle.noImageFound")}
|
||||
{t("trackingDetails.noImageFound")}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
@ -564,7 +575,7 @@ export default function ObjectLifecycle({
|
||||
}
|
||||
>
|
||||
<div className="text-primary">
|
||||
{t("objectLifecycle.createObjectMask")}
|
||||
{t("trackingDetails.createObjectMask")}
|
||||
</div>
|
||||
</div>
|
||||
</ContextMenuItem>
|
||||
@ -574,7 +585,7 @@ export default function ObjectLifecycle({
|
||||
</div>
|
||||
|
||||
<div className="mt-3 flex flex-row items-center justify-between">
|
||||
<Heading as="h4">{t("objectLifecycle.title")}</Heading>
|
||||
<Heading as="h4">{t("trackingDetails.title")}</Heading>
|
||||
|
||||
<div className="flex flex-row gap-2">
|
||||
<Tooltip>
|
||||
@ -582,7 +593,7 @@ export default function ObjectLifecycle({
|
||||
<Button
|
||||
variant={showControls ? "select" : "default"}
|
||||
className="size-7 p-1.5"
|
||||
aria-label={t("objectLifecycle.adjustAnnotationSettings")}
|
||||
aria-label={t("trackingDetails.adjustAnnotationSettings")}
|
||||
>
|
||||
<LuSettings
|
||||
className="size-5"
|
||||
@ -592,7 +603,7 @@ export default function ObjectLifecycle({
|
||||
</TooltipTrigger>
|
||||
<TooltipPortal>
|
||||
<TooltipContent>
|
||||
{t("objectLifecycle.adjustAnnotationSettings")}
|
||||
{t("trackingDetails.adjustAnnotationSettings")}
|
||||
</TooltipContent>
|
||||
</TooltipPortal>
|
||||
</Tooltip>
|
||||
@ -600,10 +611,10 @@ export default function ObjectLifecycle({
|
||||
</div>
|
||||
<div className="flex flex-row items-center justify-between">
|
||||
<div className="mb-2 text-sm text-muted-foreground">
|
||||
{t("objectLifecycle.scrollViewTips")}
|
||||
{t("trackingDetails.scrollViewTips")}
|
||||
</div>
|
||||
<div className="min-w-20 text-right text-sm text-muted-foreground">
|
||||
{t("objectLifecycle.count", {
|
||||
{t("trackingDetails.count", {
|
||||
first: selectedIndex + 1,
|
||||
second: eventSequence?.length ?? 0,
|
||||
})}
|
||||
@ -611,7 +622,7 @@ export default function ObjectLifecycle({
|
||||
</div>
|
||||
{config?.cameras[event.camera]?.onvif.autotracking.enabled_in_config && (
|
||||
<div className="-mt-2 mb-2 text-sm text-danger">
|
||||
{t("objectLifecycle.autoTrackingTips")}
|
||||
{t("trackingDetails.autoTrackingTips")}
|
||||
</div>
|
||||
)}
|
||||
{showControls && (
|
||||
@ -762,7 +773,7 @@ export default function ObjectLifecycle({
|
||||
}
|
||||
|
||||
type GetTimelineIconParams = {
|
||||
lifecycleItem: ObjectLifecycleSequence;
|
||||
lifecycleItem: TrackingDetailsSequence;
|
||||
className?: string;
|
||||
};
|
||||
|
||||
@ -800,7 +811,7 @@ export function LifecycleIcon({
|
||||
}
|
||||
|
||||
type LifecycleIconRowProps = {
|
||||
item: ObjectLifecycleSequence;
|
||||
item: TrackingDetailsSequence;
|
||||
isActive?: boolean;
|
||||
formattedEventTimestamp: string;
|
||||
ratio: string;
|
||||
@ -822,7 +833,11 @@ function LifecycleIconRow({
|
||||
setSelectedZone,
|
||||
getZoneColor,
|
||||
}: LifecycleIconRowProps) {
|
||||
const { t } = useTranslation(["views/explore"]);
|
||||
const { t } = useTranslation(["views/explore", "components/player"]);
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
|
||||
const navigate = useNavigate();
|
||||
|
||||
return (
|
||||
<div
|
||||
@ -852,13 +867,13 @@ function LifecycleIconRow({
|
||||
<div className="mt-1 flex flex-wrap items-center gap-2 text-xs text-secondary-foreground md:gap-5">
|
||||
<div className="flex items-center gap-1">
|
||||
<span className="text-primary-variant">
|
||||
{t("objectLifecycle.lifecycleItemDesc.header.ratio")}
|
||||
{t("trackingDetails.lifecycleItemDesc.header.ratio")}
|
||||
</span>
|
||||
<span className="font-medium text-primary">{ratio}</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-1">
|
||||
<span className="text-primary-variant">
|
||||
{t("objectLifecycle.lifecycleItemDesc.header.area")}
|
||||
{t("trackingDetails.lifecycleItemDesc.header.area")}
|
||||
</span>
|
||||
{areaPx !== undefined && areaPct !== undefined ? (
|
||||
<span className="font-medium text-primary">
|
||||
@ -910,7 +925,69 @@ function LifecycleIconRow({
|
||||
</div>
|
||||
</div>
|
||||
<div className="ml-3 flex-shrink-0 px-1 text-right text-xs text-primary-variant">
|
||||
<div className="whitespace-nowrap">{formattedEventTimestamp}</div>
|
||||
<div className="flex flex-row items-center gap-3">
|
||||
<div className="whitespace-nowrap">{formattedEventTimestamp}</div>
|
||||
{(config?.plus?.enabled || item.data.box) && (
|
||||
<DropdownMenu open={isOpen} onOpenChange={setIsOpen}>
|
||||
<DropdownMenuTrigger>
|
||||
<div className="rounded p-1 pr-2" role="button">
|
||||
<HiDotsHorizontal className="size-4 text-muted-foreground" />
|
||||
</div>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuPortal>
|
||||
<DropdownMenuContent>
|
||||
{config?.plus?.enabled && (
|
||||
<DropdownMenuItem
|
||||
className="cursor-pointer"
|
||||
onSelect={async () => {
|
||||
const resp = await axios.post(
|
||||
`/${item.camera}/plus/${item.timestamp}`,
|
||||
);
|
||||
|
||||
if (resp && resp.status == 200) {
|
||||
toast.success(
|
||||
t("toast.success.submittedFrigatePlus", {
|
||||
ns: "components/player",
|
||||
}),
|
||||
{
|
||||
position: "top-center",
|
||||
},
|
||||
);
|
||||
} else {
|
||||
toast.success(
|
||||
t("toast.error.submitFrigatePlusFailed", {
|
||||
ns: "components/player",
|
||||
}),
|
||||
{
|
||||
position: "top-center",
|
||||
},
|
||||
);
|
||||
}
|
||||
}}
|
||||
>
|
||||
{t("itemMenu.submitToPlus.label")}
|
||||
</DropdownMenuItem>
|
||||
)}
|
||||
{item.data.box && (
|
||||
<DropdownMenuItem
|
||||
className="cursor-pointer"
|
||||
onSelect={() => {
|
||||
setIsOpen(false);
|
||||
setTimeout(() => {
|
||||
navigate(
|
||||
`/settings?page=masksAndZones&camera=${item.camera}&object_mask=${item.data.box}`,
|
||||
);
|
||||
}, 0);
|
||||
}}
|
||||
>
|
||||
{t("trackingDetails.createObjectMask")}
|
||||
</DropdownMenuItem>
|
||||
)}
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenuPortal>
|
||||
</DropdownMenu>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@ -1,7 +1,7 @@
|
||||
import { Recording } from "@/types/record";
|
||||
import { DynamicPlayback } from "@/types/playback";
|
||||
import { PreviewController } from "../PreviewPlayer";
|
||||
import { TimeRange, ObjectLifecycleSequence } from "@/types/timeline";
|
||||
import { TimeRange, TrackingDetailsSequence } from "@/types/timeline";
|
||||
import { calculateInpointOffset } from "@/utils/videoUtil";
|
||||
|
||||
type PlayerMode = "playback" | "scrubbing";
|
||||
@ -12,7 +12,7 @@ export class DynamicVideoController {
|
||||
private playerController: HTMLVideoElement;
|
||||
private previewController: PreviewController;
|
||||
private setNoRecording: (noRecs: boolean) => void;
|
||||
private setFocusedItem: (timeline: ObjectLifecycleSequence) => void;
|
||||
private setFocusedItem: (timeline: TrackingDetailsSequence) => void;
|
||||
private playerMode: PlayerMode = "playback";
|
||||
|
||||
// playback
|
||||
@ -29,7 +29,7 @@ export class DynamicVideoController {
|
||||
annotationOffset: number,
|
||||
defaultMode: PlayerMode,
|
||||
setNoRecording: (noRecs: boolean) => void,
|
||||
setFocusedItem: (timeline: ObjectLifecycleSequence) => void,
|
||||
setFocusedItem: (timeline: TrackingDetailsSequence) => void,
|
||||
) {
|
||||
this.camera = camera;
|
||||
this.playerController = playerController;
|
||||
@ -132,7 +132,7 @@ export class DynamicVideoController {
|
||||
});
|
||||
}
|
||||
|
||||
seekToTimelineItem(timeline: ObjectLifecycleSequence) {
|
||||
seekToTimelineItem(timeline: TrackingDetailsSequence) {
|
||||
this.playerController.pause();
|
||||
this.seekToTimestamp(timeline.timestamp + this.annotationOffset);
|
||||
this.setFocusedItem(timeline);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import { useEffect, useMemo, useRef, useState } from "react";
|
||||
import { ObjectLifecycleSequence } from "@/types/timeline";
|
||||
import { TrackingDetailsSequence } from "@/types/timeline";
|
||||
import { getLifecycleItemDescription } from "@/utils/lifecycleUtil";
|
||||
import { useDetailStream } from "@/context/detail-stream-context";
|
||||
import scrollIntoView from "scroll-into-view-if-needed";
|
||||
@ -431,7 +431,8 @@ function EventList({
|
||||
}: EventListProps) {
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
|
||||
const { selectedObjectIds, toggleObjectSelection } = useDetailStream();
|
||||
const { selectedObjectIds, setSelectedObjectIds, toggleObjectSelection } =
|
||||
useDetailStream();
|
||||
|
||||
const isSelected = selectedObjectIds.includes(event.id);
|
||||
|
||||
@ -439,13 +440,19 @@ function EventList({
|
||||
|
||||
const handleObjectSelect = (event: Event | undefined) => {
|
||||
if (event) {
|
||||
// onSeek(event.start_time ?? 0);
|
||||
toggleObjectSelection(event.id);
|
||||
setSelectedObjectIds([]);
|
||||
setSelectedObjectIds([event.id]);
|
||||
onSeek(event.start_time);
|
||||
} else {
|
||||
toggleObjectSelection(undefined);
|
||||
setSelectedObjectIds([]);
|
||||
}
|
||||
};
|
||||
|
||||
const handleTimelineClick = (ts: number, play?: boolean) => {
|
||||
handleObjectSelect(event);
|
||||
onSeek(ts, play);
|
||||
};
|
||||
|
||||
// Clear selection when effectiveTime has passed this event's end_time
|
||||
useEffect(() => {
|
||||
if (isSelected && effectiveTime && event.end_time) {
|
||||
@ -469,11 +476,6 @@ function EventList({
|
||||
isSelected
|
||||
? "bg-secondary-highlight"
|
||||
: "outline-transparent duration-500",
|
||||
!isSelected &&
|
||||
(effectiveTime ?? 0) >= (event.start_time ?? 0) - 0.5 &&
|
||||
(effectiveTime ?? 0) <=
|
||||
(event.end_time ?? event.start_time ?? 0) + 0.5 &&
|
||||
"bg-secondary-highlight",
|
||||
)}
|
||||
>
|
||||
<div className="ml-1.5 flex w-full items-end justify-between">
|
||||
@ -481,12 +483,18 @@ function EventList({
|
||||
<div
|
||||
className={cn(
|
||||
"relative rounded-full p-1 text-white",
|
||||
isSelected ? "bg-selected" : "bg-muted-foreground",
|
||||
(effectiveTime ?? 0) >= (event.start_time ?? 0) - 0.5 &&
|
||||
(effectiveTime ?? 0) <=
|
||||
(event.end_time ?? event.start_time ?? 0) + 0.5
|
||||
? "bg-selected"
|
||||
: "bg-muted-foreground",
|
||||
)}
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
handleObjectSelect(isSelected ? undefined : event);
|
||||
onSeek(event.start_time);
|
||||
handleObjectSelect(event);
|
||||
}}
|
||||
role="button"
|
||||
>
|
||||
{getIconForLabel(
|
||||
event.sub_label ? event.label + "-verified" : event.label,
|
||||
@ -497,7 +505,8 @@ function EventList({
|
||||
className="flex flex-1 items-center gap-2"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
onSeek(event.start_time ?? 0);
|
||||
onSeek(event.start_time);
|
||||
handleObjectSelect(event);
|
||||
}}
|
||||
role="button"
|
||||
>
|
||||
@ -533,8 +542,10 @@ function EventList({
|
||||
<div className="mt-2">
|
||||
<ObjectTimeline
|
||||
eventId={event.id}
|
||||
onSeek={onSeek}
|
||||
onSeek={handleTimelineClick}
|
||||
effectiveTime={effectiveTime}
|
||||
startTime={event.start_time}
|
||||
endTime={event.end_time}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
@ -543,10 +554,11 @@ function EventList({
|
||||
}
|
||||
|
||||
type LifecycleItemProps = {
|
||||
item: ObjectLifecycleSequence;
|
||||
item: TrackingDetailsSequence;
|
||||
isActive?: boolean;
|
||||
onSeek?: (timestamp: number, play?: boolean) => void;
|
||||
effectiveTime?: number;
|
||||
isTimelineActive?: boolean;
|
||||
};
|
||||
|
||||
function LifecycleItem({
|
||||
@ -554,6 +566,7 @@ function LifecycleItem({
|
||||
isActive,
|
||||
onSeek,
|
||||
effectiveTime,
|
||||
isTimelineActive = false,
|
||||
}: LifecycleItemProps) {
|
||||
const { t } = useTranslation("views/events");
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
@ -616,7 +629,7 @@ function LifecycleItem({
|
||||
<div
|
||||
role="button"
|
||||
onClick={() => {
|
||||
onSeek?.(item.timestamp ?? 0, false);
|
||||
onSeek?.(item.timestamp, false);
|
||||
}}
|
||||
className={cn(
|
||||
"flex cursor-pointer items-center gap-2 text-sm text-primary-variant",
|
||||
@ -628,8 +641,9 @@ function LifecycleItem({
|
||||
<div className="relative flex size-4 items-center justify-center">
|
||||
<LuCircle
|
||||
className={cn(
|
||||
"relative z-10 ml-[1px] size-2.5 fill-secondary-foreground stroke-none",
|
||||
"relative z-10 size-2.5 fill-secondary-foreground stroke-none",
|
||||
(isActive || (effectiveTime ?? 0) >= (item?.timestamp ?? 0)) &&
|
||||
isTimelineActive &&
|
||||
"fill-selected duration-300",
|
||||
)}
|
||||
/>
|
||||
@ -647,14 +661,14 @@ function LifecycleItem({
|
||||
<div className="flex flex-col gap-1">
|
||||
<div className="flex items-start gap-1">
|
||||
<span className="text-muted-foreground">
|
||||
{t("objectLifecycle.lifecycleItemDesc.header.ratio")}
|
||||
{t("trackingDetails.lifecycleItemDesc.header.ratio")}
|
||||
</span>
|
||||
<span className="font-medium text-foreground">{ratio}</span>
|
||||
</div>
|
||||
|
||||
<div className="flex items-start gap-1">
|
||||
<span className="text-muted-foreground">
|
||||
{t("objectLifecycle.lifecycleItemDesc.header.area")}
|
||||
{t("trackingDetails.lifecycleItemDesc.header.area")}
|
||||
</span>
|
||||
{areaPx !== undefined && areaPct !== undefined ? (
|
||||
<span className="font-medium text-foreground">
|
||||
@ -684,13 +698,17 @@ function ObjectTimeline({
|
||||
eventId,
|
||||
onSeek,
|
||||
effectiveTime,
|
||||
startTime,
|
||||
endTime,
|
||||
}: {
|
||||
eventId: string;
|
||||
onSeek: (ts: number, play?: boolean) => void;
|
||||
effectiveTime?: number;
|
||||
startTime?: number;
|
||||
endTime?: number;
|
||||
}) {
|
||||
const { t } = useTranslation("views/events");
|
||||
const { data: timeline, isValidating } = useSWR<ObjectLifecycleSequence[]>([
|
||||
const { data: timeline, isValidating } = useSWR<TrackingDetailsSequence[]>([
|
||||
"timeline",
|
||||
{
|
||||
source_id: eventId,
|
||||
@ -709,9 +727,17 @@ function ObjectTimeline({
|
||||
);
|
||||
}
|
||||
|
||||
// Check if current time is within the event's start/stop range
|
||||
const isWithinEventRange =
|
||||
effectiveTime !== undefined &&
|
||||
startTime !== undefined &&
|
||||
endTime !== undefined &&
|
||||
effectiveTime >= startTime &&
|
||||
effectiveTime <= endTime;
|
||||
|
||||
// Calculate how far down the blue line should extend based on effectiveTime
|
||||
const calculateLineHeight = () => {
|
||||
if (!timeline || timeline.length === 0) return 0;
|
||||
if (!timeline || timeline.length === 0 || !isWithinEventRange) return 0;
|
||||
|
||||
const currentTime = effectiveTime ?? 0;
|
||||
|
||||
@ -753,15 +779,19 @@ function ObjectTimeline({
|
||||
);
|
||||
};
|
||||
|
||||
const blueLineHeight = calculateLineHeight();
|
||||
const activeLineHeight = calculateLineHeight();
|
||||
|
||||
return (
|
||||
<div className="-pb-2 relative mx-2">
|
||||
<div className="absolute -top-2 bottom-2 left-2 z-0 w-0.5 -translate-x-1/2 bg-secondary-foreground" />
|
||||
<div
|
||||
className="absolute left-2 top-2 z-[5] max-h-[calc(100%-1rem)] w-0.5 -translate-x-1/2 bg-selected transition-all duration-300"
|
||||
style={{ height: `${blueLineHeight}%` }}
|
||||
/>
|
||||
{isWithinEventRange && (
|
||||
<div
|
||||
className={cn(
|
||||
"absolute left-2 top-2 z-[5] max-h-[calc(100%-1rem)] w-0.5 -translate-x-1/2 bg-selected transition-all duration-300",
|
||||
)}
|
||||
style={{ height: `${activeLineHeight}%` }}
|
||||
/>
|
||||
)}
|
||||
<div className="space-y-2">
|
||||
{timeline.map((event, idx) => {
|
||||
const isActive =
|
||||
@ -774,6 +804,7 @@ function ObjectTimeline({
|
||||
onSeek={onSeek}
|
||||
isActive={isActive}
|
||||
effectiveTime={effectiveTime}
|
||||
isTimelineActive={isWithinEventRange}
|
||||
/>
|
||||
);
|
||||
})}
|
||||
|
||||
@ -212,13 +212,13 @@ const CarouselPrevious = React.forwardRef<
|
||||
: "-top-12 left-1/2 -translate-x-1/2 rotate-90",
|
||||
className,
|
||||
)}
|
||||
aria-label={t("objectLifecycle.carousel.previous")}
|
||||
aria-label={t("trackingDetails.carousel.previous")}
|
||||
disabled={!canScrollPrev}
|
||||
onClick={scrollPrev}
|
||||
{...props}
|
||||
>
|
||||
<ArrowLeft className="h-4 w-4" />
|
||||
<span className="sr-only">{t("objectLifecycle.carousel.previous")}</span>
|
||||
<span className="sr-only">{t("trackingDetails.carousel.previous")}</span>
|
||||
</Button>
|
||||
);
|
||||
});
|
||||
@ -243,13 +243,13 @@ const CarouselNext = React.forwardRef<
|
||||
: "-bottom-12 left-1/2 -translate-x-1/2 rotate-90",
|
||||
className,
|
||||
)}
|
||||
aria-label={t("objectLifecycle.carousel.next")}
|
||||
aria-label={t("trackingDetails.carousel.next")}
|
||||
disabled={!canScrollNext}
|
||||
onClick={scrollNext}
|
||||
{...props}
|
||||
>
|
||||
<ArrowRight className="h-4 w-4" />
|
||||
<span className="sr-only">{t("objectLifecycle.carousel.next")}</span>
|
||||
<span className="sr-only">{t("trackingDetails.carousel.next")}</span>
|
||||
</Button>
|
||||
);
|
||||
});
|
||||
|
||||
@ -7,6 +7,7 @@ export interface DetailStreamContextType {
|
||||
currentTime: number;
|
||||
camera: string;
|
||||
annotationOffset: number; // milliseconds
|
||||
setSelectedObjectIds: React.Dispatch<React.SetStateAction<string[]>>;
|
||||
setAnnotationOffset: (ms: number) => void;
|
||||
toggleObjectSelection: (id: string | undefined) => void;
|
||||
isDetailMode: boolean;
|
||||
@ -69,6 +70,7 @@ export function DetailStreamProvider({
|
||||
camera,
|
||||
annotationOffset,
|
||||
setAnnotationOffset,
|
||||
setSelectedObjectIds,
|
||||
toggleObjectSelection,
|
||||
isDetailMode,
|
||||
};
|
||||
|
||||
@ -10,7 +10,7 @@ export enum LifecycleClassType {
|
||||
PATH_POINT = "path_point",
|
||||
}
|
||||
|
||||
export type ObjectLifecycleSequence = {
|
||||
export type TrackingDetailsSequence = {
|
||||
camera: string;
|
||||
timestamp: number;
|
||||
data: {
|
||||
@ -39,5 +39,5 @@ export type Position = {
|
||||
x: number;
|
||||
y: number;
|
||||
timestamp: number;
|
||||
lifecycle_item?: ObjectLifecycleSequence;
|
||||
lifecycle_item?: TrackingDetailsSequence;
|
||||
};
|
||||
|
||||
@ -1,10 +1,10 @@
|
||||
import { ObjectLifecycleSequence } from "@/types/timeline";
|
||||
import { TrackingDetailsSequence } from "@/types/timeline";
|
||||
import { t } from "i18next";
|
||||
import i18n, { getTranslatedLabel } from "./i18n";
|
||||
import { capitalizeFirstLetter } from "./stringUtil";
|
||||
|
||||
export function getLifecycleItemDescription(
|
||||
lifecycleItem: ObjectLifecycleSequence,
|
||||
lifecycleItem: TrackingDetailsSequence,
|
||||
) {
|
||||
const rawLabel = Array.isArray(lifecycleItem.data.sub_label)
|
||||
? lifecycleItem.data.sub_label[0]
|
||||
@ -27,12 +27,12 @@ export function getLifecycleItemDescription(
|
||||
|
||||
switch (lifecycleItem.class_type) {
|
||||
case "visible":
|
||||
return t("objectLifecycle.lifecycleItemDesc.visible", {
|
||||
return t("trackingDetails.lifecycleItemDesc.visible", {
|
||||
ns: "views/explore",
|
||||
label,
|
||||
});
|
||||
case "entered_zone":
|
||||
return t("objectLifecycle.lifecycleItemDesc.entered_zone", {
|
||||
return t("trackingDetails.lifecycleItemDesc.entered_zone", {
|
||||
ns: "views/explore",
|
||||
label,
|
||||
zones:
|
||||
@ -49,12 +49,12 @@ export function getLifecycleItemDescription(
|
||||
).join(" and "),
|
||||
});
|
||||
case "active":
|
||||
return t("objectLifecycle.lifecycleItemDesc.active", {
|
||||
return t("trackingDetails.lifecycleItemDesc.active", {
|
||||
ns: "views/explore",
|
||||
label,
|
||||
});
|
||||
case "stationary":
|
||||
return t("objectLifecycle.lifecycleItemDesc.stationary", {
|
||||
return t("trackingDetails.lifecycleItemDesc.stationary", {
|
||||
ns: "views/explore",
|
||||
label,
|
||||
});
|
||||
@ -65,7 +65,7 @@ export function getLifecycleItemDescription(
|
||||
lifecycleItem.data.attribute == "license_plate"
|
||||
) {
|
||||
title = t(
|
||||
"objectLifecycle.lifecycleItemDesc.attribute.faceOrLicense_plate",
|
||||
"trackingDetails.lifecycleItemDesc.attribute.faceOrLicense_plate",
|
||||
{
|
||||
ns: "views/explore",
|
||||
label,
|
||||
@ -75,7 +75,7 @@ export function getLifecycleItemDescription(
|
||||
},
|
||||
);
|
||||
} else {
|
||||
title = t("objectLifecycle.lifecycleItemDesc.attribute.other", {
|
||||
title = t("trackingDetails.lifecycleItemDesc.attribute.other", {
|
||||
ns: "views/explore",
|
||||
label: lifecycleItem.data.label,
|
||||
attribute: getTranslatedLabel(
|
||||
@ -86,17 +86,17 @@ export function getLifecycleItemDescription(
|
||||
return title;
|
||||
}
|
||||
case "gone":
|
||||
return t("objectLifecycle.lifecycleItemDesc.gone", {
|
||||
return t("trackingDetails.lifecycleItemDesc.gone", {
|
||||
ns: "views/explore",
|
||||
label,
|
||||
});
|
||||
case "heard":
|
||||
return t("objectLifecycle.lifecycleItemDesc.heard", {
|
||||
return t("trackingDetails.lifecycleItemDesc.heard", {
|
||||
ns: "views/explore",
|
||||
label,
|
||||
});
|
||||
case "external":
|
||||
return t("objectLifecycle.lifecycleItemDesc.external", {
|
||||
return t("trackingDetails.lifecycleItemDesc.external", {
|
||||
ns: "views/explore",
|
||||
label,
|
||||
});
|
||||
|
||||
@ -11,7 +11,6 @@ import {
|
||||
FrigateConfig,
|
||||
} from "@/types/frigateConfig";
|
||||
import { useEffect, useMemo, useState } from "react";
|
||||
import { isMobile } from "react-device-detect";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { FaFolderPlus } from "react-icons/fa";
|
||||
import { MdModelTraining } from "react-icons/md";
|
||||
@ -131,7 +130,7 @@ export default function ModelSelectionView({
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex size-full gap-2 p-2">
|
||||
<div className="grid auto-rows-max grid-cols-2 gap-2 overflow-y-auto p-2 md:grid-cols-4 lg:grid-cols-5 xl:grid-cols-6 2xl:grid-cols-8 3xl:grid-cols-10">
|
||||
{selectedClassificationConfigs.length === 0 ? (
|
||||
<NoModelsView
|
||||
onCreateModel={() => setNewModel(true)}
|
||||
@ -208,14 +207,13 @@ function ModelCard({ config, onClick }: ModelCardProps) {
|
||||
<div
|
||||
key={config.name}
|
||||
className={cn(
|
||||
"relative size-60 cursor-pointer overflow-hidden rounded-lg",
|
||||
"relative aspect-square w-full cursor-pointer overflow-hidden rounded-lg",
|
||||
"outline-transparent duration-500",
|
||||
isMobile && "w-full",
|
||||
)}
|
||||
onClick={() => onClick()}
|
||||
>
|
||||
<img
|
||||
className={cn("size-full", isMobile && "w-full")}
|
||||
className="size-full"
|
||||
src={`${baseUrl}clips/${config.name}/dataset/${coverImage?.name}/${coverImage?.img}`}
|
||||
/>
|
||||
<ImageShadowOverlay />
|
||||
|
||||
@ -202,6 +202,11 @@ export default function EventView({
|
||||
t("export.toast.success", { ns: "components/dialog" }),
|
||||
{
|
||||
position: "top-center",
|
||||
action: (
|
||||
<a href="/export" target="_blank" rel="noopener noreferrer">
|
||||
<Button>View</Button>
|
||||
</a>
|
||||
),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
@ -232,8 +232,8 @@ function ExploreThumbnailImage({
|
||||
}
|
||||
};
|
||||
|
||||
const handleShowObjectLifecycle = () => {
|
||||
onSelectSearch(event, false, "object_lifecycle");
|
||||
const handleShowTrackingDetails = () => {
|
||||
onSelectSearch(event, false, "tracking_details");
|
||||
};
|
||||
|
||||
const handleShowSnapshot = () => {
|
||||
@ -251,7 +251,7 @@ function ExploreThumbnailImage({
|
||||
searchResult={event}
|
||||
findSimilar={handleFindSimilar}
|
||||
refreshResults={mutate}
|
||||
showObjectLifecycle={handleShowObjectLifecycle}
|
||||
showTrackingDetails={handleShowTrackingDetails}
|
||||
showSnapshot={handleShowSnapshot}
|
||||
addTrigger={handleAddTrigger}
|
||||
isContextMenu={true}
|
||||
|
||||
@ -644,8 +644,8 @@ export default function SearchView({
|
||||
}
|
||||
}}
|
||||
refreshResults={refresh}
|
||||
showObjectLifecycle={() =>
|
||||
onSelectSearch(value, false, "object_lifecycle")
|
||||
showTrackingDetails={() =>
|
||||
onSelectSearch(value, false, "tracking_details")
|
||||
}
|
||||
showSnapshot={() =>
|
||||
onSelectSearch(value, false, "snapshot")
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user