Compare commits

..

9 Commits

Author SHA1 Message Date
Hosted Weblate
2a8eacedb8
Translated using Weblate (Norwegian Bokmål)
Currently translated at 100.0% (72 of 72 strings)

Translated using Weblate (Norwegian Bokmål)

Currently translated at 100.0% (596 of 596 strings)

Translated using Weblate (Norwegian Bokmål)

Currently translated at 100.0% (48 of 48 strings)

Translated using Weblate (Norwegian Bokmål)

Currently translated at 100.0% (124 of 124 strings)

Translated using Weblate (Norwegian Bokmål)

Currently translated at 100.0% (72 of 72 strings)

Translated using Weblate (Norwegian Bokmål)

Currently translated at 100.0% (54 of 54 strings)

Translated using Weblate (Norwegian Bokmål)

Currently translated at 100.0% (596 of 596 strings)

Translated using Weblate (Norwegian Bokmål)

Currently translated at 100.0% (88 of 88 strings)

Translated using Weblate (Norwegian Bokmål)

Currently translated at 100.0% (124 of 124 strings)

Translated using Weblate (Norwegian Bokmål)

Currently translated at 100.0% (39 of 39 strings)

Translated using Weblate (Norwegian Bokmål)

Currently translated at 100.0% (596 of 596 strings)

Translated using Weblate (Norwegian Bokmål)

Currently translated at 100.0% (90 of 90 strings)

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Co-authored-by: OverTheHillsAndFarAway <prosjektx@users.noreply.hosted.weblate.org>
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-dialog/nb_NO/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-filter/nb_NO/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-classificationmodel/nb_NO/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-events/nb_NO/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-explore/nb_NO/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-live/nb_NO/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-search/nb_NO/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-settings/nb_NO/
Translation: Frigate NVR/components-dialog
Translation: Frigate NVR/components-filter
Translation: Frigate NVR/views-classificationmodel
Translation: Frigate NVR/views-events
Translation: Frigate NVR/views-explore
Translation: Frigate NVR/views-live
Translation: Frigate NVR/views-search
Translation: Frigate NVR/views-settings
2025-11-01 14:19:48 +00:00
Hosted Weblate
21da0c979f
Translated using Weblate (Chinese (Simplified Han script))
Currently translated at 100.0% (88 of 88 strings)

Translated using Weblate (Chinese (Simplified Han script))

Currently translated at 100.0% (596 of 596 strings)

Translated using Weblate (Chinese (Simplified Han script))

Currently translated at 100.0% (124 of 124 strings)

Translated using Weblate (Chinese (Simplified Han script))

Currently translated at 100.0% (39 of 39 strings)

Co-authored-by: GuoQing Liu <842607283@qq.com>
Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-classificationmodel/zh_Hans/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-events/zh_Hans/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-explore/zh_Hans/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-settings/zh_Hans/
Translation: Frigate NVR/views-classificationmodel
Translation: Frigate NVR/views-events
Translation: Frigate NVR/views-explore
Translation: Frigate NVR/views-settings
2025-11-01 14:19:47 +00:00
Hosted Weblate
d451cef1d5
Translated using Weblate (Slovak)
Currently translated at 85.9% (512 of 596 strings)

Translated using Weblate (Slovak)

Currently translated at 99.1% (123 of 124 strings)

Translated using Weblate (Slovak)

Currently translated at 100.0% (54 of 54 strings)

Translated using Weblate (Slovak)

Currently translated at 98.6% (494 of 501 strings)

Translated using Weblate (Slovak)

Currently translated at 100.0% (88 of 88 strings)

Translated using Weblate (Slovak)

Currently translated at 74.1% (442 of 596 strings)

Translated using Weblate (Slovak)

Currently translated at 98.3% (122 of 124 strings)

Translated using Weblate (Slovak)

Currently translated at 100.0% (13 of 13 strings)

Translated using Weblate (Slovak)

Currently translated at 100.0% (39 of 39 strings)

Translated using Weblate (Slovak)

Currently translated at 98.1% (53 of 54 strings)

Translated using Weblate (Slovak)

Currently translated at 100.0% (206 of 206 strings)

Translated using Weblate (Slovak)

Currently translated at 88.0% (441 of 501 strings)

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Co-authored-by: Jakub K <klacanjakub0@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/audio/sk/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/common/sk/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-dialog/sk/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-classificationmodel/sk/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-events/sk/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-explore/sk/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-exports/sk/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-settings/sk/
Translation: Frigate NVR/audio
Translation: Frigate NVR/common
Translation: Frigate NVR/components-dialog
Translation: Frigate NVR/views-classificationmodel
Translation: Frigate NVR/views-events
Translation: Frigate NVR/views-explore
Translation: Frigate NVR/views-exports
Translation: Frigate NVR/views-settings
2025-11-01 14:19:46 +00:00
Hosted Weblate
50f4f686a6
Translated using Weblate (French)
Currently translated at 100.0% (89 of 89 strings)

Translated using Weblate (French)

Currently translated at 100.0% (88 of 88 strings)

Translated using Weblate (French)

Currently translated at 100.0% (596 of 596 strings)

Translated using Weblate (French)

Currently translated at 100.0% (124 of 124 strings)

Translated using Weblate (French)

Currently translated at 100.0% (39 of 39 strings)

Co-authored-by: Apocoloquintose <bertrand.moreux@gmail.com>
Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-classificationmodel/fr/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-events/fr/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-explore/fr/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-settings/fr/
Translation: Frigate NVR/views-classificationmodel
Translation: Frigate NVR/views-events
Translation: Frigate NVR/views-explore
Translation: Frigate NVR/views-settings
2025-11-01 14:19:45 +00:00
Hosted Weblate
5e23230df5
Translated using Weblate (Dutch)
Currently translated at 100.0% (89 of 89 strings)

Translated using Weblate (Dutch)

Currently translated at 100.0% (88 of 88 strings)

Translated using Weblate (Dutch)

Currently translated at 100.0% (596 of 596 strings)

Translated using Weblate (Dutch)

Currently translated at 100.0% (124 of 124 strings)

Translated using Weblate (Dutch)

Currently translated at 100.0% (39 of 39 strings)

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Co-authored-by: Marijn <168113859+Marijn0@users.noreply.github.com>
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-classificationmodel/nl/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-events/nl/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-explore/nl/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-settings/nl/
Translation: Frigate NVR/views-classificationmodel
Translation: Frigate NVR/views-events
Translation: Frigate NVR/views-explore
Translation: Frigate NVR/views-settings
2025-11-01 14:19:44 +00:00
Hosted Weblate
15522b916e
Translated using Weblate (Italian)
Currently translated at 100.0% (89 of 89 strings)

Translated using Weblate (Italian)

Currently translated at 100.0% (88 of 88 strings)

Translated using Weblate (Italian)

Currently translated at 100.0% (596 of 596 strings)

Translated using Weblate (Italian)

Currently translated at 100.0% (124 of 124 strings)

Translated using Weblate (Italian)

Currently translated at 100.0% (54 of 54 strings)

Translated using Weblate (Italian)

Currently translated at 100.0% (206 of 206 strings)

Translated using Weblate (Italian)

Currently translated at 28.4% (25 of 88 strings)

Translated using Weblate (Italian)

Currently translated at 100.0% (13 of 13 strings)

Translated using Weblate (Italian)

Currently translated at 100.0% (39 of 39 strings)

Translated using Weblate (Italian)

Currently translated at 100.0% (51 of 51 strings)

Translated using Weblate (Italian)

Currently translated at 75.0% (93 of 124 strings)

Translated using Weblate (Italian)

Currently translated at 98.1% (53 of 54 strings)

Co-authored-by: Gringo <ita.translations@tiscali.it>
Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/common/it/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-dialog/it/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-classificationmodel/it/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-events/it/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-explore/it/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-exports/it/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-facelibrary/it/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-settings/it/
Translation: Frigate NVR/common
Translation: Frigate NVR/components-dialog
Translation: Frigate NVR/views-classificationmodel
Translation: Frigate NVR/views-events
Translation: Frigate NVR/views-explore
Translation: Frigate NVR/views-exports
Translation: Frigate NVR/views-facelibrary
Translation: Frigate NVR/views-settings
2025-11-01 14:19:42 +00:00
Hosted Weblate
516d84cc31
Translated using Weblate (Ukrainian)
Currently translated at 100.0% (89 of 89 strings)

Translated using Weblate (Ukrainian)

Currently translated at 100.0% (88 of 88 strings)

Translated using Weblate (Ukrainian)

Currently translated at 100.0% (596 of 596 strings)

Translated using Weblate (Ukrainian)

Currently translated at 100.0% (51 of 51 strings)

Translated using Weblate (Ukrainian)

Currently translated at 100.0% (13 of 13 strings)

Translated using Weblate (Ukrainian)

Currently translated at 100.0% (124 of 124 strings)

Translated using Weblate (Ukrainian)

Currently translated at 100.0% (39 of 39 strings)

Translated using Weblate (Ukrainian)

Currently translated at 100.0% (54 of 54 strings)

Translated using Weblate (Ukrainian)

Currently translated at 100.0% (206 of 206 strings)

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Co-authored-by: Максим Горпиніч <gorpinicmaksim0@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/common/uk/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-dialog/uk/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-classificationmodel/uk/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-events/uk/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-explore/uk/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-exports/uk/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-facelibrary/uk/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-settings/uk/
Translation: Frigate NVR/common
Translation: Frigate NVR/components-dialog
Translation: Frigate NVR/views-classificationmodel
Translation: Frigate NVR/views-events
Translation: Frigate NVR/views-explore
Translation: Frigate NVR/views-exports
Translation: Frigate NVR/views-facelibrary
Translation: Frigate NVR/views-settings
2025-11-01 14:19:41 +00:00
Hosted Weblate
114def0617
Translated using Weblate (Romanian)
Currently translated at 100.0% (88 of 88 strings)

Translated using Weblate (Romanian)

Currently translated at 100.0% (51 of 51 strings)

Translated using Weblate (Romanian)

Currently translated at 100.0% (124 of 124 strings)

Translated using Weblate (Romanian)

Currently translated at 100.0% (39 of 39 strings)

Translated using Weblate (Romanian)

Currently translated at 100.0% (593 of 593 strings)

Translated using Weblate (Romanian)

Currently translated at 100.0% (48 of 48 strings)

Translated using Weblate (Romanian)

Currently translated at 100.0% (90 of 90 strings)

Translated using Weblate (Romanian)

Currently translated at 100.0% (51 of 51 strings)

Translated using Weblate (Romanian)

Currently translated at 100.0% (13 of 13 strings)

Translated using Weblate (Romanian)

Currently translated at 100.0% (124 of 124 strings)

Translated using Weblate (Romanian)

Currently translated at 100.0% (34 of 34 strings)

Translated using Weblate (Romanian)

Currently translated at 100.0% (54 of 54 strings)

Translated using Weblate (Romanian)

Currently translated at 100.0% (206 of 206 strings)

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Co-authored-by: lukasig <lukasig@hotmail.com>
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/common/ro/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/components-dialog/ro/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-classificationmodel/ro/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-events/ro/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-explore/ro/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-exports/ro/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-facelibrary/ro/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-live/ro/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-search/ro/
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-settings/ro/
Translation: Frigate NVR/common
Translation: Frigate NVR/components-dialog
Translation: Frigate NVR/views-classificationmodel
Translation: Frigate NVR/views-events
Translation: Frigate NVR/views-explore
Translation: Frigate NVR/views-exports
Translation: Frigate NVR/views-facelibrary
Translation: Frigate NVR/views-live
Translation: Frigate NVR/views-search
Translation: Frigate NVR/views-settings
2025-11-01 14:19:40 +00:00
Hosted Weblate
71f0472ff5
Translated using Weblate (Portuguese (Brazil))
Currently translated at 100.0% (34 of 34 strings)

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Co-authored-by: Marcelo Popper Costa <marcelo_popper@hotmail.com>
Translate-URL: https://hosted.weblate.org/projects/frigate-nvr/views-events/pt_BR/
Translation: Frigate NVR/views-events
2025-11-01 14:19:39 +00:00
98 changed files with 535 additions and 1658 deletions

View File

@ -2,9 +2,9 @@
set -e
# Download the MxAccl for Frigate github release
wget https://github.com/memryx/mx_accl_frigate/archive/refs/tags/v2.1.0.zip -O /tmp/mxaccl.zip
wget https://github.com/memryx/mx_accl_frigate/archive/refs/heads/main.zip -O /tmp/mxaccl.zip
unzip /tmp/mxaccl.zip -d /tmp
mv /tmp/mx_accl_frigate-2.1.0 /opt/mx_accl_frigate
mv /tmp/mx_accl_frigate-main /opt/mx_accl_frigate
rm /tmp/mxaccl.zip
# Install Python dependencies

View File

@ -56,7 +56,7 @@ pywebpush == 2.0.*
# alpr
pyclipper == 1.3.*
shapely == 2.0.*
rapidfuzz==3.12.*
Levenshtein==0.26.*
# HailoRT Wheels
appdirs==1.4.*
argcomplete==2.0.*

View File

@ -24,13 +24,10 @@ echo "Adding MemryX GPG key and repository..."
wget -qO- https://developer.memryx.com/deb/memryx.asc | sudo tee /etc/apt/trusted.gpg.d/memryx.asc >/dev/null
echo 'deb https://developer.memryx.com/deb stable main' | sudo tee /etc/apt/sources.list.d/memryx.list >/dev/null
# Update and install specific SDK 2.1 packages
echo "Installing MemryX SDK 2.1 packages..."
# Update and install memx-drivers
echo "Installing memx-drivers..."
sudo apt update
sudo apt install -y memx-drivers=2.1.* memx-accl=2.1.* mxa-manager=2.1.*
# Hold packages to prevent automatic upgrades
sudo apt-mark hold memx-drivers memx-accl mxa-manager
sudo apt install -y memx-drivers
# ARM-specific board setup
if [[ "$arch" == "aarch64" || "$arch" == "arm64" ]]; then
@ -40,5 +37,11 @@ fi
echo -e "\n\n\033[1;31mYOU MUST RESTART YOUR COMPUTER NOW\033[0m\n\n"
echo "MemryX SDK 2.1 installation complete!"
# Install other runtime packages
packages=("memx-accl" "mxa-manager")
for pkg in "${packages[@]}"; do
echo "Installing $pkg..."
sudo apt install -y "$pkg"
done
echo "MemryX installation complete!"

View File

@ -1,2 +1 @@
cuda-python == 12.6.*; platform_machine == 'aarch64'
numpy == 1.26.*; platform_machine == 'aarch64'

View File

@ -37,6 +37,7 @@ from frigate.stats.prometheus import get_metrics, update_metrics
from frigate.util.builtin import (
clean_camera_user_pass,
flatten_config_data,
get_tz_modifiers,
process_config_query_string,
update_yaml_file_bulk,
)
@ -47,7 +48,6 @@ from frigate.util.services import (
restart_frigate,
vainfo_hwaccel,
)
from frigate.util.time import get_tz_modifiers
from frigate.version import VERSION
logger = logging.getLogger(__name__)
@ -403,13 +403,12 @@ def config_set(request: Request, body: AppConfigSetBody):
settings,
)
else:
# Generic handling for global config updates
# Handle nested config updates (e.g., config/classification/custom/{name})
settings = config.get_nested_object(body.update_topic)
# Publish None for removal, actual config for add/update
request.app.config_publisher.publisher.publish(
body.update_topic, settings
)
if settings:
request.app.config_publisher.publisher.publish(
body.update_topic, settings
)
return JSONResponse(
content=(

View File

@ -31,7 +31,7 @@ from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags
from frigate.config import FrigateConfig
from frigate.config.camera import DetectConfig
from frigate.const import CLIPS_DIR, FACE_DIR, MODEL_CACHE_DIR
from frigate.const import CLIPS_DIR, FACE_DIR
from frigate.embeddings import EmbeddingsContext
from frigate.models import Event
from frigate.util.classification import (
@ -828,13 +828,9 @@ def delete_classification_model(request: Request, name: str):
status_code=404,
)
# Delete the classification model's data directory in clips
data_dir = os.path.join(CLIPS_DIR, sanitize_filename(name))
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
# Delete the classification model's data directory
model_dir = os.path.join(CLIPS_DIR, sanitize_filename(name))
# Delete the classification model's files in model_cache
model_dir = os.path.join(MODEL_CACHE_DIR, sanitize_filename(name))
if os.path.exists(model_dir):
shutil.rmtree(model_dir)

View File

@ -2,7 +2,6 @@
import base64
import datetime
import json
import logging
import os
import random
@ -58,8 +57,8 @@ from frigate.const import CLIPS_DIR, TRIGGER_DIR
from frigate.embeddings import EmbeddingsContext
from frigate.models import Event, ReviewSegment, Timeline, Trigger
from frigate.track.object_processing import TrackedObject
from frigate.util.builtin import get_tz_modifiers
from frigate.util.path import get_event_thumbnail_bytes
from frigate.util.time import get_dst_transitions, get_tz_modifiers
logger = logging.getLogger(__name__)
@ -814,6 +813,7 @@ def events_summary(
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
tz_name = params.timezone
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(tz_name)
has_clip = params.has_clip
has_snapshot = params.has_snapshot
@ -828,91 +828,33 @@ def events_summary(
if len(clauses) == 0:
clauses.append((True))
time_range_query = (
groups = (
Event.select(
fn.MIN(Event.start_time).alias("min_time"),
fn.MAX(Event.start_time).alias("max_time"),
Event.camera,
Event.label,
Event.sub_label,
Event.data,
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Event.start_time, "unixepoch", hour_modifier, minute_modifier
),
).alias("day"),
Event.zones,
fn.COUNT(Event.id).alias("count"),
)
.where(reduce(operator.and_, clauses) & (Event.camera << allowed_cameras))
.dicts()
.get()
.group_by(
Event.camera,
Event.label,
Event.sub_label,
Event.data,
(Event.start_time + seconds_offset).cast("int") / (3600 * 24),
Event.zones,
)
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
if min_time is None or max_time is None:
return JSONResponse(content=[])
dst_periods = get_dst_transitions(tz_name, min_time, max_time)
grouped: dict[tuple, dict] = {}
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
period_groups = (
Event.select(
Event.camera,
Event.label,
Event.sub_label,
Event.data,
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Event.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("day"),
Event.zones,
fn.COUNT(Event.id).alias("count"),
)
.where(
reduce(operator.and_, clauses)
& (Event.camera << allowed_cameras)
& (Event.start_time >= period_start)
& (Event.start_time <= period_end)
)
.group_by(
Event.camera,
Event.label,
Event.sub_label,
Event.data,
(Event.start_time + period_offset).cast("int") / (3600 * 24),
Event.zones,
)
.namedtuples()
)
for g in period_groups:
key = (
g.camera,
g.label,
g.sub_label,
json.dumps(g.data, sort_keys=True) if g.data is not None else None,
g.day,
json.dumps(g.zones, sort_keys=True) if g.zones is not None else None,
)
if key in grouped:
grouped[key]["count"] += int(g.count or 0)
else:
grouped[key] = {
"camera": g.camera,
"label": g.label,
"sub_label": g.sub_label,
"data": g.data,
"day": g.day,
"zones": g.zones,
"count": int(g.count or 0),
}
return JSONResponse(content=list(grouped.values()))
return JSONResponse(content=[e for e in groups.dicts()])
@router.get(

View File

@ -34,7 +34,7 @@ from frigate.record.export import (
PlaybackSourceEnum,
RecordingExporter,
)
from frigate.util.time import is_current_hour
from frigate.util.builtin import is_current_hour
logger = logging.getLogger(__name__)

View File

@ -44,9 +44,9 @@ from frigate.const import (
)
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
from frigate.track.object_processing import TrackedObjectProcessor
from frigate.util.builtin import get_tz_modifiers
from frigate.util.image import get_image_from_recording
from frigate.util.path import get_event_thumbnail_bytes
from frigate.util.time import get_dst_transitions
logger = logging.getLogger(__name__)
@ -424,6 +424,7 @@ def all_recordings_summary(
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Returns true/false by day indicating if recordings exist"""
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
cameras = params.cameras
if cameras != "all":
@ -431,70 +432,41 @@ def all_recordings_summary(
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content={})
camera_list = list(filtered)
cameras = ",".join(filtered)
else:
camera_list = allowed_cameras
cameras = allowed_cameras
time_range_query = (
query = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time + seconds_offset,
"unixepoch",
hour_modifier,
minute_modifier,
),
).alias("day")
)
.where(Recordings.camera << camera_list)
.dicts()
.get()
.group_by(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time + seconds_offset,
"unixepoch",
hour_modifier,
minute_modifier,
),
)
)
.order_by(Recordings.start_time.desc())
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
if params.cameras != "all":
query = query.where(Recordings.camera << cameras.split(","))
if min_time is None or max_time is None:
return JSONResponse(content={})
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
days: dict[str, bool] = {}
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
period_query = (
Recordings.select(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("day")
)
.where(
(Recordings.camera << camera_list)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
)
)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
for g in period_query:
days[g.day] = True
recording_days = query.namedtuples()
days = {day.day: True for day in recording_days}
return JSONResponse(content=days)
@ -504,103 +476,61 @@ def all_recordings_summary(
)
async def recordings_summary(camera_name: str, timezone: str = "utc"):
"""Returns hourly summary for recordings of given camera"""
time_range_query = (
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(timezone)
recording_groups = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Recordings.start_time, "unixepoch", hour_modifier, minute_modifier
),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
fn.SUM(Recordings.objects).alias("objects"),
)
.where(Recordings.camera == camera_name)
.dicts()
.get()
.group_by((Recordings.start_time + seconds_offset).cast("int") / 3600)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
days: dict[str, dict] = {}
if min_time is None or max_time is None:
return JSONResponse(content=list(days.values()))
dst_periods = get_dst_transitions(timezone, min_time, max_time)
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
recording_groups = (
Recordings.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
fn.SUM(Recordings.objects).alias("objects"),
)
.where(
(Recordings.camera == camera_name)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
.order_by(Recordings.start_time.desc())
.namedtuples()
event_groups = (
Event.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Event.start_time, "unixepoch", hour_modifier, minute_modifier
),
).alias("hour"),
fn.COUNT(Event.id).alias("count"),
)
.where(Event.camera == camera_name, Event.has_clip)
.group_by((Event.start_time + seconds_offset).cast("int") / 3600)
.namedtuples()
)
event_groups = (
Event.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Event.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.COUNT(Event.id).alias("count"),
)
.where(Event.camera == camera_name, Event.has_clip)
.where(
(Event.start_time >= period_start) & (Event.start_time <= period_end)
)
.group_by((Event.start_time + period_offset).cast("int") / 3600)
.namedtuples()
)
event_map = {g.hour: g.count for g in event_groups}
event_map = {g.hour: g.count for g in event_groups}
days = {}
for recording_group in recording_groups:
parts = recording_group.hour.split()
hour = parts[1]
day = parts[0]
events_count = event_map.get(recording_group.hour, 0)
hour_data = {
"hour": hour,
"events": events_count,
"motion": recording_group.motion,
"objects": recording_group.objects,
"duration": round(recording_group.duration),
}
if day in days:
# merge counts if already present (edge-case at DST boundary)
days[day]["events"] += events_count or 0
days[day]["hours"].append(hour_data)
else:
days[day] = {
"events": events_count or 0,
"hours": [hour_data],
"day": day,
}
for recording_group in recording_groups:
parts = recording_group.hour.split()
hour = parts[1]
day = parts[0]
events_count = event_map.get(recording_group.hour, 0)
hour_data = {
"hour": hour,
"events": events_count,
"motion": recording_group.motion,
"objects": recording_group.objects,
"duration": round(recording_group.duration),
}
if day not in days:
days[day] = {"events": events_count, "hours": [hour_data], "day": day}
else:
days[day]["events"] += events_count
days[day]["hours"].append(hour_data)
return JSONResponse(content=list(days.values()))

View File

@ -36,7 +36,7 @@ from frigate.config import FrigateConfig
from frigate.embeddings import EmbeddingsContext
from frigate.models import Recordings, ReviewSegment, UserReviewStatus
from frigate.review.types import SeverityEnum
from frigate.util.time import get_dst_transitions
from frigate.util.builtin import get_tz_modifiers
logger = logging.getLogger(__name__)
@ -197,6 +197,7 @@ async def review_summary(
user_id = current_user["username"]
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
cameras = params.cameras
@ -328,135 +329,89 @@ async def review_summary(
)
clauses.append(reduce(operator.or_, label_clauses))
# Find the time range of available data
time_range_query = (
day_in_seconds = 60 * 60 * 24
last_month_query = (
ReviewSegment.select(
fn.MIN(ReviewSegment.start_time).alias("min_time"),
fn.MAX(ReviewSegment.start_time).alias("max_time"),
fn.strftime(
"%Y-%m-%d",
fn.datetime(
ReviewSegment.start_time,
"unixepoch",
hour_modifier,
minute_modifier,
),
).alias("day"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_detection"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert),
1,
)
],
0,
)
).alias("total_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection),
1,
)
],
0,
)
).alias("total_detection"),
)
.left_outer_join(
UserReviewStatus,
on=(
(ReviewSegment.id == UserReviewStatus.review_segment)
& (UserReviewStatus.user_id == user_id)
),
)
.where(reduce(operator.and_, clauses) if clauses else True)
.dicts()
.get()
.group_by(
(ReviewSegment.start_time + seconds_offset).cast("int") / day_in_seconds
)
.order_by(ReviewSegment.start_time.desc())
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
data = {
"last24Hours": last_24_query,
}
# If no data, return early
if min_time is None or max_time is None:
return JSONResponse(content=data)
# Get DST transition periods
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
day_in_seconds = 60 * 60 * 24
# Query each DST period separately with the correct offset
for period_start, period_end, period_offset in dst_periods:
# Calculate hour/minute modifiers for this period
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
# Build clauses including time range for this period
period_clauses = clauses.copy()
period_clauses.append(
(ReviewSegment.start_time >= period_start)
& (ReviewSegment.start_time <= period_end)
)
period_query = (
ReviewSegment.select(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
ReviewSegment.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("day"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_detection"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert),
1,
)
],
0,
)
).alias("total_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection),
1,
)
],
0,
)
).alias("total_detection"),
)
.left_outer_join(
UserReviewStatus,
on=(
(ReviewSegment.id == UserReviewStatus.review_segment)
& (UserReviewStatus.user_id == user_id)
),
)
.where(reduce(operator.and_, period_clauses))
.group_by(
(ReviewSegment.start_time + period_offset).cast("int") / day_in_seconds
)
.order_by(ReviewSegment.start_time.desc())
)
# Merge results from this period
for e in period_query.dicts().iterator():
day_key = e["day"]
if day_key in data:
# Merge counts if day already exists (edge case at DST boundary)
data[day_key]["reviewed_alert"] += e["reviewed_alert"] or 0
data[day_key]["reviewed_detection"] += e["reviewed_detection"] or 0
data[day_key]["total_alert"] += e["total_alert"] or 0
data[day_key]["total_detection"] += e["total_detection"] or 0
else:
data[day_key] = e
for e in last_month_query.dicts().iterator():
data[e["day"]] = e
return JSONResponse(content=data)

View File

@ -14,8 +14,8 @@ from typing import Any, List, Optional, Tuple
import cv2
import numpy as np
from Levenshtein import distance, jaro_winkler
from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
from rapidfuzz.distance import JaroWinkler, Levenshtein
from shapely.geometry import Polygon
from frigate.comms.event_metadata_updater import (
@ -1123,9 +1123,7 @@ class LicensePlateProcessingMixin:
for i, plate in enumerate(plates):
merged = False
for j, cluster in enumerate(clusters):
sims = [
JaroWinkler.similarity(plate["plate"], v["plate"]) for v in cluster
]
sims = [jaro_winkler(plate["plate"], v["plate"]) for v in cluster]
if len(sims) > 0:
avg_sim = sum(sims) / len(sims)
if avg_sim >= self.cluster_threshold:
@ -1502,7 +1500,7 @@ class LicensePlateProcessingMixin:
and current_time - data["last_seen"]
<= self.config.cameras[camera].lpr.expire_time
):
similarity = JaroWinkler.similarity(data["plate"], top_plate)
similarity = jaro_winkler(data["plate"], top_plate)
if similarity >= self.similarity_threshold:
plate_id = existing_id
logger.debug(
@ -1582,8 +1580,7 @@ class LicensePlateProcessingMixin:
for label, plates_list in self.lpr_config.known_plates.items()
if any(
re.match(f"^{plate}$", rep_plate)
or Levenshtein.distance(plate, rep_plate)
<= self.lpr_config.match_distance
or distance(plate, rep_plate) <= self.lpr_config.match_distance
for plate in plates_list
)
),

View File

@ -166,7 +166,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
camera = obj_data["camera"]
if not self.config.cameras[camera].face_recognition.enabled:
logger.debug(f"Face recognition disabled for camera {camera}, skipping")
return
start = datetime.datetime.now().timestamp()
@ -209,7 +208,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
person_box = obj_data.get("box")
if not person_box:
logger.debug(f"No person box available for {id}")
return
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
@ -235,8 +233,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
try:
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
except Exception as e:
logger.debug(f"Failed to convert face frame color for {id}: {e}")
except Exception:
return
else:
# don't run for object without attributes
@ -254,7 +251,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
# no faces detected in this frame
if not face:
logger.debug(f"No face attributes found for {id}")
return
face_box = face.get("box")
@ -278,7 +274,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
res = self.recognizer.classify(face_frame)
if not res:
logger.debug(f"Face recognizer returned no result for {id}")
self.__update_metrics(datetime.datetime.now().timestamp() - start)
return
@ -335,7 +330,6 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
def handle_request(self, topic, request_data) -> dict[str, Any] | None:
if topic == EmbeddingsRequestEnum.clear_face_classifier.value:
self.recognizer.clear()
return {"success": True, "message": "Face classifier cleared."}
elif topic == EmbeddingsRequestEnum.recognize_face.value:
img = cv2.imdecode(
np.frombuffer(base64.b64decode(request_data["image"]), dtype=np.uint8),

View File

@ -158,13 +158,11 @@ class EmbeddingMaintainer(threading.Thread):
self.realtime_processors: list[RealTimeProcessorApi] = []
if self.config.face_recognition.enabled:
logger.debug("Face recognition enabled, initializing FaceRealTimeProcessor")
self.realtime_processors.append(
FaceRealTimeProcessor(
self.config, self.requestor, self.event_metadata_publisher, metrics
)
)
logger.debug("FaceRealTimeProcessor initialized successfully")
if self.config.classification.bird.enabled:
self.realtime_processors.append(
@ -285,66 +283,45 @@ class EmbeddingMaintainer(threading.Thread):
logger.info("Exiting embeddings maintenance...")
def _check_classification_config_updates(self) -> None:
"""Check for classification config updates and add/remove processors."""
"""Check for classification config updates and add new processors."""
topic, model_config = self.classification_config_subscriber.check_for_update()
if topic:
if topic and model_config:
model_name = topic.split("/")[-1]
self.config.classification.custom[model_name] = model_config
if model_config is None:
self.realtime_processors = [
processor
for processor in self.realtime_processors
if not (
isinstance(
processor,
(
CustomStateClassificationProcessor,
CustomObjectClassificationProcessor,
),
# Check if processor already exists
for processor in self.realtime_processors:
if isinstance(
processor,
(
CustomStateClassificationProcessor,
CustomObjectClassificationProcessor,
),
):
if processor.model_config.name == model_name:
logger.debug(
f"Classification processor for model {model_name} already exists, skipping"
)
and processor.model_config.name == model_name
)
]
return
logger.info(
f"Successfully removed classification processor for model: {model_name}"
if model_config.state_config is not None:
processor = CustomStateClassificationProcessor(
self.config, model_config, self.requestor, self.metrics
)
else:
self.config.classification.custom[model_name] = model_config
# Check if processor already exists
for processor in self.realtime_processors:
if isinstance(
processor,
(
CustomStateClassificationProcessor,
CustomObjectClassificationProcessor,
),
):
if processor.model_config.name == model_name:
logger.debug(
f"Classification processor for model {model_name} already exists, skipping"
)
return
if model_config.state_config is not None:
processor = CustomStateClassificationProcessor(
self.config, model_config, self.requestor, self.metrics
)
else:
processor = CustomObjectClassificationProcessor(
self.config,
model_config,
self.event_metadata_publisher,
self.metrics,
)
self.realtime_processors.append(processor)
logger.info(
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
processor = CustomObjectClassificationProcessor(
self.config,
model_config,
self.event_metadata_publisher,
self.metrics,
)
self.realtime_processors.append(processor)
logger.info(
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
)
def _process_requests(self) -> None:
"""Process embeddings requests"""
@ -397,14 +374,7 @@ class EmbeddingMaintainer(threading.Thread):
source_type, _, camera, frame_name, data = update
logger.debug(
f"Received update - source_type: {source_type}, camera: {camera}, data label: {data.get('label') if data else 'None'}"
)
if not camera or source_type != EventTypeEnum.tracked_object:
logger.debug(
f"Skipping update - camera: {camera}, source_type: {source_type}"
)
return
if self.config.semantic_search.enabled:
@ -414,9 +384,6 @@ class EmbeddingMaintainer(threading.Thread):
# no need to process updated objects if no processors are active
if len(self.realtime_processors) == 0 and len(self.post_processors) == 0:
logger.debug(
f"No processors active - realtime: {len(self.realtime_processors)}, post: {len(self.post_processors)}"
)
return
# Create our own thumbnail based on the bounding box and the frame time
@ -425,7 +392,6 @@ class EmbeddingMaintainer(threading.Thread):
frame_name, camera_config.frame_shape_yuv
)
except FileNotFoundError:
logger.debug(f"Frame {frame_name} not found for camera {camera}")
pass
if yuv_frame is None:
@ -434,11 +400,7 @@ class EmbeddingMaintainer(threading.Thread):
)
return
logger.debug(
f"Processing {len(self.realtime_processors)} realtime processors for object {data.get('id')} (label: {data.get('label')})"
)
for processor in self.realtime_processors:
logger.debug(f"Calling process_frame on {processor.__class__.__name__}")
processor.process_frame(data, yuv_frame)
for processor in self.post_processors:

View File

@ -9,7 +9,6 @@ from multiprocessing import Queue, Value
from multiprocessing.synchronize import Event as MpEvent
import numpy as np
import zmq
from frigate.comms.object_detector_signaler import (
ObjectDetectorPublisher,
@ -378,15 +377,6 @@ class RemoteObjectDetector:
if self.stop_event.is_set():
return detections
# Drain any stale detection results from the ZMQ buffer before making a new request
# This prevents reading detection results from a previous request
# NOTE: This should never happen, but can in some rare cases
while True:
try:
self.detector_subscriber.socket.recv_string(flags=zmq.NOBLOCK)
except zmq.Again:
break
# copy input to shared memory
self.np_shm[:] = tensor_input[:]
self.detection_queue.put(self.name)

View File

@ -14,8 +14,7 @@ from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum
from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR
from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus
from frigate.record.util import remove_empty_directories, sync_recordings
from frigate.util.builtin import clear_and_unlink
from frigate.util.time import get_tomorrow_at_time
from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time
logger = logging.getLogger(__name__)

View File

@ -28,7 +28,7 @@ from frigate.ffmpeg_presets import (
parse_preset_hardware_acceleration_encode,
)
from frigate.models import Export, Previews, Recordings
from frigate.util.time import is_current_hour
from frigate.util.builtin import is_current_hour
logger = logging.getLogger(__name__)

View File

@ -15,9 +15,12 @@ from collections.abc import Mapping
from multiprocessing.sharedctypes import Synchronized
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
from zoneinfo import ZoneInfoNotFoundError
import numpy as np
import pytz
from ruamel.yaml import YAML
from tzlocal import get_localzone
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
@ -154,6 +157,17 @@ def load_labels(path: Optional[str], encoding="utf-8", prefill=91):
return labels
def get_tz_modifiers(tz_name: str) -> Tuple[str, str, float]:
seconds_offset = (
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
)
hours_offset = int(seconds_offset / 60 / 60)
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
hour_modifier = f"{hours_offset} hour"
minute_modifier = f"{minutes_offset} minute"
return hour_modifier, minute_modifier, seconds_offset
def to_relative_box(
width: int, height: int, box: Tuple[int, int, int, int]
) -> Tuple[int | float, int | float, int | float, int | float]:
@ -284,6 +298,34 @@ def find_by_key(dictionary, target_key):
return None
def get_tomorrow_at_time(hour: int) -> datetime.datetime:
"""Returns the datetime of the following day at 2am."""
try:
tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1)
except ZoneInfoNotFoundError:
tomorrow = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
days=1
)
logger.warning(
"Using utc for maintenance due to missing or incorrect timezone set"
)
return tomorrow.replace(hour=hour, minute=0, second=0).astimezone(
datetime.timezone.utc
)
def is_current_hour(timestamp: int) -> bool:
"""Returns if timestamp is in the current UTC hour."""
start_of_next_hour = (
datetime.datetime.now(datetime.timezone.utc).replace(
minute=0, second=0, microsecond=0
)
+ datetime.timedelta(hours=1)
).timestamp()
return timestamp < start_of_next_hour
def clear_and_unlink(file: Path, missing_ok: bool = True) -> None:
"""clear file then unlink to avoid space retained by file descriptors."""
if not missing_ok and not file.exists():

View File

@ -1,100 +0,0 @@
"""Time utilities."""
import datetime
import logging
from typing import Tuple
from zoneinfo import ZoneInfoNotFoundError
import pytz
from tzlocal import get_localzone
logger = logging.getLogger(__name__)
def get_tz_modifiers(tz_name: str) -> Tuple[str, str, float]:
seconds_offset = (
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
)
hours_offset = int(seconds_offset / 60 / 60)
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
hour_modifier = f"{hours_offset} hour"
minute_modifier = f"{minutes_offset} minute"
return hour_modifier, minute_modifier, seconds_offset
def get_tomorrow_at_time(hour: int) -> datetime.datetime:
"""Returns the datetime of the following day at 2am."""
try:
tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1)
except ZoneInfoNotFoundError:
tomorrow = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
days=1
)
logger.warning(
"Using utc for maintenance due to missing or incorrect timezone set"
)
return tomorrow.replace(hour=hour, minute=0, second=0).astimezone(
datetime.timezone.utc
)
def is_current_hour(timestamp: int) -> bool:
"""Returns if timestamp is in the current UTC hour."""
start_of_next_hour = (
datetime.datetime.now(datetime.timezone.utc).replace(
minute=0, second=0, microsecond=0
)
+ datetime.timedelta(hours=1)
).timestamp()
return timestamp < start_of_next_hour
def get_dst_transitions(
tz_name: str, start_time: float, end_time: float
) -> list[tuple[float, float]]:
"""
Find DST transition points and return time periods with consistent offsets.
Args:
tz_name: Timezone name (e.g., 'America/New_York')
start_time: Start timestamp (UTC)
end_time: End timestamp (UTC)
Returns:
List of (period_start, period_end, seconds_offset) tuples representing
continuous periods with the same UTC offset
"""
try:
tz = pytz.timezone(tz_name)
except pytz.UnknownTimeZoneError:
# If timezone is invalid, return single period with no offset
return [(start_time, end_time, 0)]
periods = []
current = start_time
# Get initial offset
dt = datetime.datetime.utcfromtimestamp(current).replace(tzinfo=pytz.UTC)
local_dt = dt.astimezone(tz)
prev_offset = local_dt.utcoffset().total_seconds()
period_start = start_time
# Check each day for offset changes
while current <= end_time:
dt = datetime.datetime.utcfromtimestamp(current).replace(tzinfo=pytz.UTC)
local_dt = dt.astimezone(tz)
current_offset = local_dt.utcoffset().total_seconds()
if current_offset != prev_offset:
# Found a transition - close previous period
periods.append((period_start, current, prev_offset))
period_start = current
prev_offset = current_offset
current += 86400 # Check daily
# Add final period
periods.append((period_start, end_time, prev_offset))
return periods

View File

@ -34,7 +34,7 @@ from frigate.ptz.autotrack import ptz_moving_at_frame_time
from frigate.track import ObjectTracker
from frigate.track.norfair_tracker import NorfairTracker
from frigate.track.tracked_object import TrackedObjectAttribute
from frigate.util.builtin import EventsPerSecond
from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time
from frigate.util.image import (
FrameManager,
SharedMemoryFrameManager,
@ -53,7 +53,6 @@ from frigate.util.object import (
reduce_detections,
)
from frigate.util.process import FrigateProcess
from frigate.util.time import get_tomorrow_at_time
logger = logging.getLogger(__name__)

View File

@ -218,10 +218,7 @@
}
},
"label": {
"back": "Torna enrere",
"hide": "Oculta {{item}}",
"show": "Mostra {{item}}",
"ID": "ID"
"back": "Torna enrere"
},
"button": {
"apply": "Aplicar",
@ -284,14 +281,5 @@
"readTheDocumentation": "Llegir la documentació",
"information": {
"pixels": "{{area}}px"
},
"list": {
"two": "{{0}} i {{1}}",
"many": "{{items}}, i {{last}}",
"separatorWithSpace": ",· "
},
"field": {
"optional": "Opcional",
"internalID": "L'ID intern que Frigate s'utilitza a la configuració i a la base de dades"
}
}

View File

@ -117,7 +117,6 @@
"search": {
"placeholder": "Cerca per etiqueta o subetiqueta..."
},
"noImages": "No s'han trobat miniatures per a aquesta càmera",
"unknownLabel": "Imatge activadora desada"
"noImages": "No s'han trobat miniatures per a aquesta càmera"
}
}

View File

@ -5,9 +5,7 @@
"renameCategory": "Reanomena la classe",
"deleteCategory": "Suprimeix la classe",
"deleteImages": "Suprimeix les imatges",
"trainModel": "Model de tren",
"addClassification": "Afegeix una classificació",
"deleteModels": "Suprimeix els models"
"trainModel": "Model de tren"
},
"toast": {
"success": {
@ -15,15 +13,13 @@
"deletedImage": "Imatges suprimides",
"categorizedImage": "Imatge classificada amb èxit",
"trainedModel": "Model entrenat amb èxit.",
"trainingModel": "S'ha iniciat amb èxit la formació de models.",
"deletedModel": "S'han suprimit correctament {{count}} models"
"trainingModel": "S'ha iniciat amb èxit la formació de models."
},
"error": {
"deleteImageFailed": "No s'ha pogut suprimir: {{errorMessage}}",
"deleteCategoryFailed": "No s'ha pogut suprimir la classe: {{errorMessage}}",
"categorizeFailed": "No s'ha pogut categoritzar la imatge: {{errorMessage}}",
"trainingFailed": "No s'ha pogut iniciar l'entrenament del model: {{errorMessage}}",
"deleteModelFailed": "No s'ha pogut suprimir el model: {{errorMessage}}"
"trainingFailed": "No s'ha pogut iniciar l'entrenament del model: {{errorMessage}}"
}
},
"deleteCategory": {
@ -47,8 +43,7 @@
},
"train": {
"title": "Classificacions recents",
"aria": "Selecciona les classificacions recents",
"titleShort": "Recent"
"aria": "Selecciona les classificacions recents"
},
"categories": "Classes",
"createCategory": {
@ -103,8 +98,7 @@
"stateRequiresTwoClasses": "Els models d'estat requereixen almenys 2 classes",
"objectLabelRequired": "Seleccioneu una etiqueta d'objecte",
"objectTypeRequired": "Seleccioneu un tipus de classificació"
},
"states": "Estats"
}
},
"step2": {
"description": "Seleccioneu les càmeres i definiu l'àrea a monitoritzar per a cada càmera. El model classificarà l'estat d'aquestes àrees.",
@ -137,14 +131,5 @@
},
"generateSuccess": "Imatges de mostra generades amb èxit"
}
},
"deleteModel": {
"title": "Suprimeix el model de classificació",
"single": "Esteu segur que voleu suprimir {{name}}? Això suprimirà permanentment totes les dades associades, incloses les imatges i les dades d'entrenament. Aquesta acció no es pot desfer.",
"desc": "Esteu segur que voleu suprimir {{count}} model(s)? Això suprimirà permanentment totes les dades associades, incloses les imatges i les dades d'entrenament. Aquesta acció no es pot desfer."
},
"menu": {
"objects": "Objectes",
"states": "Estats"
}
}

View File

@ -43,17 +43,10 @@
"aria": "Canvia la vista de detall",
"trackedObject_other": "objectes",
"noObjectDetailData": "No hi ha dades de detall d'objecte disponibles.",
"label": "Detall",
"settings": "Configuració de la vista detallada",
"alwaysExpandActive": {
"title": "Expandeix sempre actiu",
"desc": "Expandeix sempre els detalls de l'objecte de la revisió activa quan estigui disponible."
}
"label": "Detall"
},
"objectTrack": {
"clickToSeek": "Feu clic per cercar aquesta hora",
"trackedPoint": "Punt de seguiment"
},
"zoomIn": "Amplia",
"zoomOut": "Redueix"
}
}

View File

@ -84,8 +84,7 @@
"details": "detalls",
"snapshot": "instantània",
"video": "vídeo",
"object_lifecycle": "cicle de vida de l'objecte",
"thumbnail": "miniatura"
"object_lifecycle": "cicle de vida de l'objecte"
},
"details": {
"timestamp": "Marca temporal",
@ -241,7 +240,7 @@
"noImageFound": "No s'ha trobat cap imatge amb aquesta hora.",
"createObjectMask": "Crear màscara d'objecte",
"adjustAnnotationSettings": "Ajustar configuració d'anotacions",
"scrollViewTips": "Feu clic per veure els moments significatius del cicle de vida d'aquest objecte.",
"scrollViewTips": "Desplaça per veure els moments significants del cicle de vida d'aquest objecte.",
"autoTrackingTips": "Limitar les posicións de la caixa serà inacurat per càmeras de seguiment automàtic.",
"count": "{{first}} de {{second}}",
"trackedPoint": "Punt Seguit",
@ -271,7 +270,7 @@
},
"offset": {
"label": "Òfset d'Anotació",
"desc": "Aquestes dades provenen del flux de detecció de la càmera, però se superposen a les imatges del flux de gravació. És poc probable que els dos fluxos estiguin perfectament sincronitzats. Com a resultat, el quadre delimitador i les imatges no s'alinearan perfectament. Tanmateix, es pot utilitzar el camp <code>annotation_offset</code> per ajustar-ho.",
"desc": "Aquestes dades provenen del flux de detecció de la càmera, però se superposen a les imatges del flux de gravació. És poc probable que els dos fluxos estiguin perfectament sincronitzats. Com a resultat, el quadre delimitador i el metratge no s'alinearan perfectament. Tanmateix, es pot utilitzar el camp <code>annotation_offset</code> per ajustar-ho.",
"millisecondsToOffset": "Millisegons per l'òfset de detecció d'anotacions per. <em>Per defecte: 0</em>",
"tips": "CONSELL: Imagineu-vos que hi ha un clip d'esdeveniment amb una persona caminant d'esquerra a dreta. Si el quadre delimitador de la cronologia de l'esdeveniment està constantment a l'esquerra de la persona, aleshores s'hauria de disminuir el valor. De la mateixa manera, si una persona camina d'esquerra a dreta i el quadre delimitador està constantment per davant de la persona, aleshores s'hauria d'augmentar el valor.",
"toast": {

View File

@ -13,11 +13,5 @@
"error": {
"renameExportFailed": "Error al canviar el nom de lexportació: {{errorMessage}}"
}
},
"tooltip": {
"shareExport": "Comparteix l'exportació",
"downloadVideo": "Baixa el vídeo",
"editName": "Edita el nom",
"deleteExport": "Suprimeix l'exportació"
}
}

View File

@ -55,12 +55,12 @@
"searchFor": "Buscar {{inputValue}}",
"button": {
"clear": "Netejar cerca",
"save": "Desa la cerca",
"delete": "Elimina la recerca desada",
"filterInformation": "Informació del filtre",
"save": "Desar la cerca",
"delete": "Suprimeix la recerca desada",
"filterInformation": "Informació de filtre",
"filterActive": "Filtres actius"
},
"trackedObjectId": "ID de l'objecte rastrejat",
"trackedObjectId": "ID d'objecte rastrejat",
"placeholder": {
"search": "Cercar…"
},

View File

@ -49,10 +49,6 @@
"playAlertVideos": {
"label": "Reproduir vídeos dalerta",
"desc": "Per defecte, les alertes recents al tauler en directe es reprodueixen com a vídeos petits en bucle. Desactiva aquesta opció per mostrar només una imatge estàtica de les alertes recents en aquest dispositiu/navegador."
},
"displayCameraNames": {
"label": "Mostra sempre els noms de la càmera",
"desc": "Mostra sempre els noms de les càmeres en un xip al tauler de visualització en directe multicàmera."
}
},
"storedLayouts": {
@ -699,9 +695,7 @@
},
"actions": {
"alert": "Marcar com Alerta",
"notification": "Enviar Notificació",
"sub_label": "Afegeix una subetiqueta",
"attribute": "Afegeix un atribut"
"notification": "Enviar Notificació"
},
"dialog": {
"createTrigger": {
@ -719,28 +713,25 @@
"form": {
"name": {
"title": "Nom",
"placeholder": "Anomena aquest activador",
"placeholder": "Entrar el nom del disparador",
"error": {
"minLength": "El camp ha de tenir almenys 2 caràcters.",
"invalidCharacters": "El camp només pot contenir lletres, números, guions baixos i guions.",
"minLength": "El nom ha de tenir almenys 2 caràcters de llargada.",
"invalidCharacters": "El nom només pot contenir lletres, números, guions i guinons baixos.",
"alreadyExists": "El disparador amb aquest nom ja existeix per aquesta càmera."
},
"description": "Introduïu un nom o una descripció únics per a identificar aquest activador"
}
},
"enabled": {
"description": "Activar o desactivar aquest disparador"
},
"type": {
"title": "Tipus",
"placeholder": "Selecciona un tipus de disparador",
"description": "Activa quan es detecta una descripció similar d'un objecte rastrejat",
"thumbnail": "Activa quan es detecti una miniatura d'objecte rastrejada similar"
"placeholder": "Selecciona un tipus de disparador"
},
"content": {
"title": "Contingut",
"imagePlaceholder": "Selecciona una miniatura",
"imagePlaceholder": "Selecciona una imatge",
"textPlaceholder": "Entra el contingut de text",
"imageDesc": "Només es mostren les 100 miniatures més recents. Si no podeu trobar la miniatura desitjada, reviseu els objectes anteriors a Explora i configureu un activador des del menú.",
"imageDesc": "Selecciona una imatge per disparar aquesta acció quan una imatge similar sigui detectada.",
"textDesc": "Entra el text per disparar aquesta acció quan es detecti una descripció d'objecte a rastrejar similar.",
"error": {
"required": "Contigunt requerit."
@ -751,12 +742,11 @@
"error": {
"min": "El llindar ha de ser mínim 0",
"max": "El llindar ha de ser máxim 1"
},
"desc": "Estableix el llindar de similitud per a aquest activador. Un llindar més alt significa que es requereix una coincidència més propera per disparar el disparador."
}
},
"actions": {
"title": "Accions",
"desc": "Per defecte, Frigate dispara un missatge MQTT per a tots els activadors. Subetiquetes afegeix el nom de l'activador a l'etiqueta de l'objecte. Els atributs són metadades cercables emmagatzemades per separat a les metadades de l'objecte rastrejat.",
"desc": "Per defecte, Frigate dispara un missatge MQTT per tots els disparadors. Tria una acció adicional per realitzar quan aquest disparador dispari.",
"error": {
"min": "S'ha de seleccionar una acció com a mínim."
}
@ -782,30 +772,13 @@
},
"documentTitle": "Disparadors",
"management": {
"title": "Activadors",
"title": "Gestió de disparadors",
"desc": "Gestionar els disparadors de {{camera}}. Usa les tipus de miniatures per disparar miniatures similars a l'objecte a seguir seleccionat, i el tipus de descripció per disparar en cas de descripcions similars a l'especificada."
},
"addTrigger": "Afegir disaprador",
"semanticSearch": {
"desc": "La cerca semàntica ha d'estar activada per a utilitzar els activadors.",
"title": "La cerca semàntica està desactivada"
},
"wizard": {
"title": "Crea un activador",
"step1": {
"description": "Configura la configuració bàsica per al vostre activador."
},
"step2": {
"description": "Configura el contingut que activarà aquesta acció."
},
"step3": {
"description": "Configura el llindar i les accions d'aquest activador."
},
"steps": {
"nameAndType": "Nom i tipus",
"configureData": "Configura les dades",
"thresholdAndActions": "Llindar i accions"
}
}
},
"roles": {
@ -1012,9 +985,7 @@
"estimatedBandwidth": "Amplada de banda estimad",
"roles": "Rols",
"streamValidated": "El flux {{number}} s'ha validat correctament",
"streamValidationFailed": "Ha fallat la validació del flux {{number}}",
"ffmpegModule": "Usa el mode de compatibilitat del flux",
"ffmpegModuleDescription": "Si el flux no es carrega després de diversos intents, proveu d'activar-ho. Quan està activat, Frigate utilitzarà el mòdul ffmpeg amb go2rtc. Això pot proporcionar una millor compatibilitat amb alguns fluxos de càmera."
"streamValidationFailed": "Ha fallat la validació del flux {{number}}"
}
},
"cameraManagement": {

View File

@ -10,7 +10,6 @@
"unknownError": "Neznámá chyba. Zkontrolujte logy.",
"webUnknownError": "Neznámá chuba. Zkontrolujte logy konzoly.",
"rateLimit": "Limit požadavků překročen. Zkuste to znovu později."
},
"firstTimeLogin": "Přihlašujete se poprvé? Přihlašovací údaje jsou vypsány v logu Frigate."
}
}
}

View File

@ -1,7 +1 @@
{
"documentTitle": "Klasifikační modely",
"button": {
"deleteClassificationAttempts": "Odstranit Klasifikační obrazy",
"renameCategory": "Přejmenovat třídu"
}
}
{}

View File

@ -41,7 +41,7 @@
"aria": "Vybrat trénink"
},
"description": {
"addFace": "Přidejte novou kolekci do Knihovny obličejů nahráním prvního obrázku.",
"addFace": "Prúvodce přidání nové kolekce do Knižnice obličejů.",
"placeholder": "Zadejte název pro tuto kolekci",
"invalidName": "Neplatný název. Názvy mohou obsahovat pouze písmena, čísla, mezery, apostrofy, podtržítka a pomlčky."
},

View File

@ -11,8 +11,7 @@
"general": "Obecné nastavení - Frigate",
"frigatePlus": "Frigate+ nastavení - Frigate",
"enrichments": "Nastavení obohacení - Frigate",
"cameraManagement": "Správa kamer - Frigate",
"cameraReview": "Nastavení kontroly kamery - Frigate"
"cameraManagement": "Správa kamer - Frigate"
},
"frigatePlus": {
"toast": {

View File

@ -100,8 +100,7 @@
},
"list": {
"two": "{{0}} and {{1}}",
"many": "{{items}}, and {{last}}",
"separatorWithSpace": ", "
"many": "{{items}}, and {{last}}"
},
"field": {
"optional": "Optional",

View File

@ -271,8 +271,6 @@
"disconnectStream": "Disconnect",
"estimatedBandwidth": "Estimated Bandwidth",
"roles": "Roles",
"ffmpegModule": "Use stream compatibility mode",
"ffmpegModuleDescription": "If the stream does not load after several attempts, try enabling this. When enabled, Frigate will use the ffmpeg module with go2rtc. This may provide better compatibility with some camera streams.",
"none": "None",
"error": "Error",
"streamValidated": "Stream {{number}} validated successfully",

View File

@ -301,7 +301,6 @@
},
"list": {
"two": "{{0}} et {{1}}",
"many": "{{items}}, et {{last}}",
"separatorWithSpace": ", "
"many": "{{items}}, et {{last}}"
}
}

View File

@ -5,9 +5,7 @@
"renameCategory": "Renommer la classe",
"deleteCategory": "Supprimer la classe",
"deleteImages": "Supprimer les images",
"trainModel": "Entraîner le modèle",
"addClassification": "Ajouter une classification",
"deleteModels": "Supprimer les modèles"
"trainModel": "Entraîner le modèle"
},
"toast": {
"success": {
@ -15,15 +13,13 @@
"deletedImage": "Images supprimées",
"categorizedImage": "Image classifiée avec succès",
"trainedModel": "Modèle entraîné avec succès.",
"trainingModel": "L'entraînement du modèle a démarré avec succès.",
"deletedModel": "{{count}} modèle(s) supprimé(s) avec succès"
"trainingModel": "L'entraînement du modèle a démarré avec succès."
},
"error": {
"deleteImageFailed": "Échec de la suppression : {{errorMessage}}",
"deleteCategoryFailed": "Échec de la suppression de la classe : {{errorMessage}}",
"categorizeFailed": "Échec de la catégorisation de l'image : {{errorMessage}}",
"trainingFailed": "Échec du démarrage de l'entraînement du modèle : {{errorMessage}}",
"deleteModelFailed": "Impossible de supprimer le modèle : {{errorMessage}}"
"trainingFailed": "Échec du démarrage de l'entraînement du modèle : {{errorMessage}}"
}
},
"deleteCategory": {
@ -137,14 +133,5 @@
},
"generateSuccess": "Génération des images d'exemple réussie"
}
},
"deleteModel": {
"title": "Supprimer le modèle de classification",
"single": "Voulez-vous vraiment supprimer {{name}}? Cela supprimera définitivement toutes les données associées, y compris les images et les données d'entraînement. Cette action est irréversible.",
"desc": "Voulez-vous vraiment supprimer {{count}} modèle(s)? Cela supprimera définitivement toutes les données associées, y compris les images et les données d'entraînement. Cette action est irréversible."
},
"menu": {
"objects": "Objets",
"states": "États"
}
}

View File

@ -109,8 +109,7 @@
"details": "détails",
"video": "vidéo",
"object_lifecycle": "cycle de vie de l'objet",
"snapshot": "instantané",
"thumbnail": "Miniature"
"snapshot": "instantané"
},
"objectLifecycle": {
"title": "Cycle de vie de l'objet",
@ -241,7 +240,7 @@
"noImageFound": "Aucune image trouvée pour cet horodatage",
"createObjectMask": "Créer un masque d'objet",
"adjustAnnotationSettings": "Ajuster les paramètres d'annotation",
"scrollViewTips": "Cliquez pour voir les moments significatifs du cycle de vie de cet objet.",
"scrollViewTips": "Défilez pour voir les moments significatifs du cycle de vie de cet objet.",
"autoTrackingTips": "Les positions des cadres de détection seront imprécises pour les caméras à suivi automatique.",
"count": "{{first}} sur {{second}}",
"trackedPoint": "Point suivi",

View File

@ -1078,9 +1078,7 @@
"resolutionHigh": "La résolution {{resolution}} risque d'augmenter l'utilisation des ressources.",
"resolutionLow": "La résolution {{resolution}} risque d'être trop faible pour détecter les petits objets de manière fiable."
},
"valid": "Valide",
"ffmpegModule": "Utiliser le mode de compatibilité du flux",
"ffmpegModuleDescription": "Si le flux ne se charge pas après plusieurs tentatives, essayez d'activer cette option. Lorsqu'elle est activée, Frigate utilisera le module ffmpeg avec go2rtc. Cela peut offrir une meilleure compatibilité avec certains flux de caméra."
"valid": "Valide"
}
},
"cameraManagement": {

View File

@ -10,7 +10,6 @@
"unknownError": "Ismeretlen hiba. Ellenőrizze a naplókat.",
"webUnknownError": "Ismeretlen hiba. Ellenőrizze a konzol naplókat.",
"rateLimit": "Túl sokszor próbálkozott. Próbálja meg később."
},
"firstTimeLogin": "Először próbálsz bejelentkezni? A hitelesítési adatok a Frigate naplóiban vannak feltüntetve."
}
}
}

View File

@ -1,16 +1 @@
{
"documentTitle": "Osztályozási modellek",
"button": {
"deleteClassificationAttempts": "Osztályozási képek törlése",
"deleteImages": "Képek törlése",
"trainModel": "Modell betanítása",
"deleteModels": "Modellek törlése"
},
"toast": {
"success": {
"deletedImage": "Törölt képek",
"deletedModel": "Sikeresen törölt {{count}} modellt",
"categorizedImage": "A kép sikeresen osztályozva"
}
}
}
{}

View File

@ -36,6 +36,5 @@
"selected_one": "{{count}} kiválasztva",
"selected_other": "{{count}} kiválasztva",
"suspiciousActivity": "Gyanús Tevékenység",
"threateningActivity": "Fenyegető Tevékenység",
"zoomIn": "Nagyítás"
"threateningActivity": "Fenyegető Tevékenység"
}

View File

@ -13,11 +13,5 @@
"error": {
"renameExportFailed": "Sikertelen export átnevezés: {{errorMessage}}"
}
},
"tooltip": {
"downloadVideo": "Videó letöltése",
"editName": "Név szerkesztése",
"deleteExport": "Export törlése",
"shareExport": "Export megosztása"
}
}

View File

@ -42,12 +42,12 @@
"title": "Gyűjtemény létrehozása",
"desc": "Új gyűjtemény létrehozása",
"new": "Új arc létrhozása",
"nextSteps": "A jó alap készítéséhez:<li>Használja a Legutóbbi felismerések fület az egyes észlelt személyekhez tartozó képek kiválasztásához és betanításához.</li>A legjobb eredmény érdekében válassza az egyenesen előre néző arcokat ábrázoló képeket és kerülje a ferde szögből készült arcképeket a tanításhoz.</li></ul>"
"nextSteps": "A jó alap készítéséhez:<li>Használja a Tanítás fület az egyes észlelt személyekhez tartozó képek kiválasztására és betanítására.</li>A legjobb eredmény érdekében válassza az egyenesen előre néző arcokat ábrázoló képeket és kerülje a ferde szögből készült arcképeket a tanításhoz.</li></ul>"
},
"description": {
"placeholder": "Adj nevet ennek a gyűjteménynek",
"invalidName": "Nem megfelelő név. A nevek csak betűket, számokat, szóközöket, aposztrófokat, alulhúzásokat és kötőjeleket tartalmazhatnak.",
"addFace": "Adj hozzá egy új gyűjteményt az Arcképtárhoz az első képed feltöltésével."
"addFace": "Segédlet új gyűjtemény hozzáadásához az arckép könyvtárban."
},
"selectFace": "Arc kiválasztása",
"deleteFaceLibrary": {

View File

@ -297,8 +297,7 @@
},
"list": {
"two": "{{0}} e {{1}}",
"many": "{{items}}, e {{last}}",
"separatorWithSpace": ", "
"many": "{{items}}, e {{last}}"
},
"field": {
"optional": "Opzionale",

View File

@ -63,7 +63,7 @@
"label": "Cerca la fonte",
"desc": "Scegli se cercare nelle miniature o nelle descrizioni degli oggetti tracciati.",
"options": {
"thumbnailImage": "Immagine in miniatura",
"thumbnailImage": "Immagine anteprima",
"description": "Descrizione"
}
}

View File

@ -5,9 +5,7 @@
"renameCategory": "Rinomina classe",
"deleteCategory": "Elimina classe",
"deleteImages": "Elimina immagini",
"trainModel": "Modello di addestramento",
"addClassification": "Aggiungi classificazione",
"deleteModels": "Elimina modelli"
"trainModel": "Modello di addestramento"
},
"toast": {
"success": {
@ -15,15 +13,13 @@
"deletedImage": "Immagini eliminate",
"categorizedImage": "Immagine classificata con successo",
"trainedModel": "Modello addestrato con successo.",
"trainingModel": "Avviato con successo l'addestramento del modello.",
"deletedModel": "Eliminati con successo {{count}} modelli"
"trainingModel": "Avviato con successo l'addestramento del modello."
},
"error": {
"deleteImageFailed": "Impossibile eliminare: {{errorMessage}}",
"deleteCategoryFailed": "Impossibile eliminare la classe: {{errorMessage}}",
"categorizeFailed": "Impossibile categorizzare l'immagine: {{errorMessage}}",
"trainingFailed": "Impossibile avviare l'addestramento del modello: {{errorMessage}}",
"deleteModelFailed": "Impossibile eliminare il modello: {{errorMessage}}"
"trainingFailed": "Impossibile avviare l'addestramento del modello: {{errorMessage}}"
}
},
"deleteCategory": {
@ -137,14 +133,5 @@
},
"generateSuccess": "Immagini campione generate correttamente"
}
},
"deleteModel": {
"title": "Elimina modello di classificazione",
"single": "Vuoi davvero eliminare {{name}}? Questa operazione eliminerà definitivamente tutti i dati associati, comprese le immagini e i dati di allenamento. Questa azione non può essere annullata.",
"desc": "Vuoi davvero eliminare {{count}} modello/i? Questa operazione eliminerà definitivamente tutti i dati associati, comprese le immagini e i dati di addestramento. Questa azione non può essere annullata."
},
"menu": {
"objects": "Oggetti",
"states": "Stati"
}
}

View File

@ -158,8 +158,7 @@
"snapshot": "istantanea",
"object_lifecycle": "ciclo di vita dell'oggetto",
"details": "dettagli",
"video": "video",
"thumbnail": "miniatura"
"video": "video"
},
"itemMenu": {
"downloadSnapshot": {
@ -241,7 +240,7 @@
"noImageFound": "Nessuna immagine trovata per questo orario.",
"createObjectMask": "Crea maschera oggetto",
"adjustAnnotationSettings": "Regola le impostazioni di annotazione",
"scrollViewTips": "Clicca per visualizzare i momenti più significativi del ciclo di vita di questo oggetto.",
"scrollViewTips": "Scorri per visualizzare i momenti più significativi del ciclo di vita di questo oggetto.",
"autoTrackingTips": "Le posizioni dei riquadri di delimitazione saranno imprecise per le telecamere con tracciamento automatico.",
"count": "{{first}} di {{second}}",
"trackedPoint": "Punto tracciato",

View File

@ -294,7 +294,6 @@
},
"list": {
"two": "{{0}} og {{1}}",
"many": "{{items}}, og {{last}}",
"separatorWithSpace": ", "
"many": "{{items}}, og {{last}}"
}
}

View File

@ -214,8 +214,7 @@
"details": "detaljer",
"snapshot": "øyeblikksbilde",
"video": "video",
"object_lifecycle": "objektets livssyklus",
"thumbnail": "miniatyrbilde"
"object_lifecycle": "objektets livssyklus"
},
"dialog": {
"confirmDelete": {
@ -239,7 +238,7 @@
"noImageFound": "Ingen bilder funnet for dette tidsstempelet.",
"createObjectMask": "Opprett objektmaske",
"adjustAnnotationSettings": "Juster annoteringsinnstillinger",
"scrollViewTips": "Klikk for å se de viktige øyeblikkene i dette objektets livssyklus.",
"scrollViewTips": "Rull for å se de viktige øyeblikkene i dette objektets livssyklus.",
"autoTrackingTips": "Posisjonene til avgrensningsboksene vil være unøyaktige for kameraer med automatisk sporing.",
"count": "{{first}} av {{second}}",
"trackedPoint": "Sporet punkt",
@ -273,7 +272,7 @@
"millisecondsToOffset": "Antall millisekunder deteksjonsannoteringene skal forskyves med. <em>Standard: 0</em>",
"tips": "TIPS: Se for deg et hendelsesklipp med en person som går fra venstre mot høyre. Hvis avgrensningsboksen på tidslinjen for hendelsen konsekvent er til venstre for personen, bør verdien reduseres. På samme måte, hvis en person går fra venstre mot høyre og avgrensningsboksen konsekvent er foran personen, bør verdien økes.",
"toast": {
"success": "Annoteringsforskyvning for {{camera}} er lagret i konfigurasjonsfilen. Start Frigate på nytt for å aktivere endringene."
"success": "Annoteringsforskyvning for {{camera}} er lagret i konfigurasjonsfilen. Start Frigate på nytt for å ta i bruk endringene."
}
}
},

View File

@ -341,7 +341,7 @@
}
},
"toast": {
"success": "Sone ({{zoneName}}) er lagret. Start Frigate på nytt for å aktivere endringer."
"success": "Sone ({{zoneName}}) er lagret. Start Frigate på nytt for å bruke endringer."
}
},
"motionMasks": {
@ -1151,7 +1151,7 @@
"selectDetectionsZones": "Velg soner for deteksjoner",
"limitDetections": "Avgrens deteksjoner til bestemte soner",
"toast": {
"success": "Konfigurasjonen for inspeksjonsklassifisering er lagret. Start Frigate på nytt for å aktivere endringer."
"success": "Konfigurasjonen for inspeksjonsklassifisering er lagret. Start Frigate på nytt for å bruke endringer."
}
}
}

View File

@ -5,9 +5,7 @@
"renameCategory": "Gi nytt navn til kategori",
"deleteCategory": "Slett kategori",
"deleteImages": "Slett bilder",
"trainModel": "Tren modell",
"addClassification": "Legg til klassifisering",
"deleteModels": "Slett modeller"
"trainModel": "Tren modell"
},
"toast": {
"success": {
@ -15,15 +13,13 @@
"deletedImage": "Bilder slettet",
"categorizedImage": "Bildet ble klassifisert",
"trainedModel": "Modellen ble trent.",
"trainingModel": "Modelltrening startet.",
"deletedModel": "{{count}} modell(er) ble slettet"
"trainingModel": "Modelltrening startet."
},
"error": {
"deleteImageFailed": "Kunne ikke slette: {{errorMessage}}",
"deleteCategoryFailed": "Kunne ikke slette kategori: {{errorMessage}}",
"categorizeFailed": "Kunne ikke klassifisere bilde: {{errorMessage}}",
"trainingFailed": "Kunne ikke starte modelltrening: {{errorMessage}}",
"deleteModelFailed": "Kunne ikke slette modell: {{errorMessage}}"
"trainingFailed": "Kunne ikke starte modelltrening: {{errorMessage}}"
}
},
"deleteCategory": {
@ -103,8 +99,7 @@
"stateRequiresTwoClasses": "Tilstandsmodeller krever minst to kategorier",
"objectLabelRequired": "Velg en objektetikett",
"objectTypeRequired": "Velg en klassifiseringstype"
},
"states": "Tilstander"
}
},
"step2": {
"description": "Velg kameraer og definer området som skal overvåkes for hvert kamera. Modellen vil klassifisere tilstanden til disse områdene.",
@ -137,14 +132,5 @@
},
"generateSuccess": "Eksempelbilder ble generert"
}
},
"deleteModel": {
"title": "Slett klassifiseringsmodell",
"single": "Er du sikker på at du vil slette {{name}}? Dette vil permanent slette alle tilknyttede data, inkludert bilder og treningsdata. Denne handlingen kan ikke angres.",
"desc": "Er du sikker på at du vil slette {{count}} modell(er)? Dette vil permanent slette alle tilknyttede data, inkludert bilder og treningsdata. Denne handlingen kan ikke angres."
},
"menu": {
"objects": "Objekter",
"states": "Tilstander"
}
}

View File

@ -290,8 +290,7 @@
},
"list": {
"two": "{{0}} en {{1}}",
"many": "{{items}}, en {{last}}",
"separatorWithSpace": ", "
"many": "{{items}}, en {{last}}"
},
"field": {
"optional": "Optioneel",

View File

@ -5,9 +5,7 @@
"renameCategory": "Klasse hernoemen",
"deleteCategory": "Klasse verwijderen",
"deleteImages": "Afbeeldingen verwijderen",
"trainModel": "Model trainen",
"addClassification": "Classificatie toevoegen",
"deleteModels": "Modellen verwijderen"
"trainModel": "Model trainen"
},
"toast": {
"success": {
@ -15,15 +13,13 @@
"deletedImage": "Verwijderde afbeeldingen",
"categorizedImage": "Succesvol geclassificeerde afbeelding",
"trainedModel": "Succesvol getraind model.",
"trainingModel": "Modeltraining succesvol gestart.",
"deletedModel": "{{count}} model(len) succesvol verwijderd"
"trainingModel": "Modeltraining succesvol gestart."
},
"error": {
"deleteImageFailed": "Verwijderen mislukt: {{errorMessage}}",
"deleteCategoryFailed": "Het verwijderen van de klasse is mislukt: {{errorMessage}}",
"categorizeFailed": "Afbeelding categoriseren mislukt: {{errorMessage}}",
"trainingFailed": "Het starten van de modeltraining is mislukt: {{errorMessage}}",
"deleteModelFailed": "Model verwijderen mislukt: {{errorMessage}}"
"trainingFailed": "Het starten van de modeltraining is mislukt: {{errorMessage}}"
}
},
"deleteCategory": {
@ -137,14 +133,5 @@
},
"generateSuccess": "Met succes gegenereerde voorbeeldafbeeldingen"
}
},
"deleteModel": {
"title": "Classificatiemodel verwijderen",
"single": "Weet u zeker dat u {{name}} wilt verwijderen? Hiermee worden alle bijbehorende gegevens, inclusief afbeeldingen en trainingsgegevens, definitief verwijderd. Deze actie kan niet ongedaan worden gemaakt.",
"desc": "Weet u zeker dat u {{count}} model(len) wilt verwijderen? Hiermee worden alle bijbehorende gegevens, inclusief afbeeldingen en trainingsgegevens, permanent verwijderd. Deze actie kan niet ongedaan worden gemaakt."
},
"menu": {
"objects": "Objecten",
"states": "Staten"
}
}

View File

@ -33,8 +33,7 @@
"details": "Details",
"video": "video",
"snapshot": "snapshot",
"object_lifecycle": "objectlevenscyclus",
"thumbnail": "thumbnail"
"object_lifecycle": "objectlevenscyclus"
},
"objectLifecycle": {
"createObjectMask": "Objectmasker maken",
@ -239,7 +238,7 @@
"noImageFound": "Er is geen afbeelding beschikbaar voor dit tijdstip.",
"createObjectMask": "Objectmasker maken",
"adjustAnnotationSettings": "Annotatie-instellingen aanpassen",
"scrollViewTips": "Klik om de belangrijke momenten uit de levenscyclus van dit object te bekijken.",
"scrollViewTips": "Scroll om de belangrijke momenten uit de levenscyclus van dit object te bekijken.",
"autoTrackingTips": "Als u een automatische objectvolgende camera gebruikt, zal het objectkader onnauwkeurig zijn.",
"count": "{{first}} van {{second}}",
"trackedPoint": "Volgpunt",

View File

@ -128,7 +128,7 @@
"documentation": "Lees de documentatie ",
"title": "Audio moet via je camera komen en in go2rtc geconfigureerd zijn voor deze stream."
},
"unavailable": "Audio is niet beschikbaar voor deze stream",
"unavailable": "Audio is niet beschikbaar voor deze stroom",
"available": "Audio is beschikbaar voor deze stream"
},
"playInBackground": {

View File

@ -1075,9 +1075,7 @@
},
"resolutionHigh": "Een resolutie van {{resolution}} kan leiden tot een verhoogd gebruik van systeembronnen.",
"resolutionLow": "Een resolutie van {{resolution}} kan te laag zijn voor betrouwbare detectie van kleine objecten."
},
"ffmpegModule": "Gebruik stream-compatibiliteitsmodus",
"ffmpegModuleDescription": "Als de stream na meerdere pogingen niet wordt geladen, probeer dit dan in te schakelen. Wanneer deze optie is ingeschakeld, gebruikt Frigate de ffmpeg-module samen met go2rtc. Dit kan zorgen voor een betere compatibiliteit met sommige camerastreams."
}
}
},
"cameraManagement": {

View File

@ -43,16 +43,10 @@
"trackedObject_one": "objeto",
"trackedObject_other": "objetos",
"noObjectDetailData": "Nenhum dado de detalhe de objeto disponível.",
"label": "Detalhe",
"settings": "Configurações de visualização detalhada",
"alwaysExpandActive": {
"title": "Expandir sempre o modo ativo"
}
"label": "Detalhe"
},
"objectTrack": {
"trackedPoint": "Ponto rastreado",
"clickToSeek": "Clique para ir para esse horário"
},
"zoomIn": "Ampliar",
"zoomOut": "Diminuir o zoom"
}
}

View File

@ -111,8 +111,7 @@
"details": "detalhes",
"snapshot": "captura de imagem",
"video": "vídeo",
"object_lifecycle": "ciclo de vida do objeto",
"thumbnail": "thumbnail"
"object_lifecycle": "ciclo de vida do objeto"
},
"objectLifecycle": {
"title": "Ciclo de Vida do Objeto",

View File

@ -5,9 +5,7 @@
"renameCategory": "Renomear Classe",
"deleteCategory": "Apagar Classe",
"deleteImages": "Apagar Imagens",
"trainModel": "Treinar Modelo",
"addClassification": "Adicionar classificação",
"deleteModels": "Excluir modelos"
"trainModel": "Treinar Modelo"
},
"toast": {
"success": {
@ -15,23 +13,12 @@
"deletedImage": "Imagens Apagadas",
"categorizedImage": "Imagem Classificada com Sucesso",
"trainedModel": "Modelo treinado com sucesso.",
"trainingModel": "Treinamento do modelo iniciado com sucesso.",
"deletedModel": "Modelo(s) {{count}} excluído(s) com sucesso"
"trainingModel": "Treinamento do modelo iniciado com sucesso."
},
"error": {
"deleteImageFailed": "Falha ao deletar:{{errorMessage}}",
"deleteCategoryFailed": "Falha ao deletar classe:{{errorMessage}}",
"categorizeFailed": "Falha ao categorizar imagem:{{errorMessage}}",
"deleteModelFailed": "Falha ao excluir o modelo: {{errorMessage}}",
"trainingFailed": "Falha ao iniciar o treinamento do modelo: {{errorMessage}}"
"categorizeFailed": "Falha ao categorizar imagem:{{errorMessage}}"
}
},
"deleteCategory": {
"title": "Excluir Classe",
"desc": "Tem certeza de que deseja excluir a classe {{name}}? Isso excluirá permanentemente todas as imagens associadas e exigirá o treinamento do modelo novamente."
},
"deleteModel": {
"title": "Deletar modelo de classificação",
"single": "Tem certeza de que deseja excluir {{name}}? Isso excluirá permanentemente todos os dados associados, incluindo imagens e dados de treinamento. Esta ação não pode ser desfeita."
}
}

View File

@ -287,8 +287,7 @@
},
"list": {
"two": "{{0}} și {{1}}",
"many": "{{items}}, și {{last}}",
"separatorWithSpace": ", "
"many": "{{items}}, și {{last}}"
},
"field": {
"optional": "Opțional",

View File

@ -5,9 +5,7 @@
"renameCategory": "Redenumește clasa",
"deleteCategory": "Șterge clasa",
"deleteImages": "Șterge imaginile",
"trainModel": "Antrenează modelul",
"addClassification": "Adaugă clasificare",
"deleteModels": "Șterge modelele"
"trainModel": "Antrenează modelul"
},
"toast": {
"success": {
@ -15,15 +13,13 @@
"deletedImage": "Imagini șterse",
"categorizedImage": "Imagine clasificată cu succes",
"trainedModel": "Model antrenat cu succes.",
"trainingModel": "Antrenamentul modelului a fost pornit cu succes.",
"deletedModel": "{{count}} model(e) șters(e) cu succes"
"trainingModel": "Antrenamentul modelului a fost pornit cu succes."
},
"error": {
"deleteImageFailed": "Ștergerea a eșuat: {{errorMessage}}",
"deleteCategoryFailed": "Ștergerea clasei a eșuat: {{errorMessage}}",
"categorizeFailed": "Categorisirea imaginii a eșuat: {{errorMessage}}",
"trainingFailed": "Pornirea antrenamentului modelului a eșuat: {{errorMessage}}",
"deleteModelFailed": "Ștergerea modelului a eșuat: {{errorMessage}}"
"trainingFailed": "Pornirea antrenamentului modelului a eșuat: {{errorMessage}}"
}
},
"deleteCategory": {
@ -103,8 +99,7 @@
"stateRequiresTwoClasses": "Modelele de stare necesită cel puțin 2 clase",
"objectLabelRequired": "Vă rugăm să selectați o etichetă de obiect",
"objectTypeRequired": "Vă rugăm să selectați un tip de clasificare"
},
"states": "Stări"
}
},
"step2": {
"description": "Selectați camerele și definiți zona de monitorizat pentru fiecare cameră. Modelul va clasifica starea acestor zone.",
@ -137,14 +132,5 @@
},
"generateSuccess": "Imaginile de exemplu au fost generate cu succes"
}
},
"deleteModel": {
"title": "Șterge modelul de clasificare",
"single": "Sigur doriți să ștergeți {{name}}? Aceasta va șterge permanent toate datele asociate, inclusiv imaginile și datele de antrenament. Această acțiune nu poate fi anulată.",
"desc": "Sigur doriți să ștergeți {{count}} model(e)? Aceasta va șterge permanent toate datele asociate, inclusiv imaginile și datele de antrenament. Această acțiune nu poate fi anulată."
},
"menu": {
"objects": "Obiecte",
"states": "Stări"
}
}

View File

@ -33,8 +33,7 @@
"details": "detalii",
"snapshot": "snapshot",
"video": "video",
"object_lifecycle": "ciclul de viață al obiectului",
"thumbnail": "miniatură"
"object_lifecycle": "ciclul de viață al obiectului"
},
"objectLifecycle": {
"lifecycleItemDesc": {
@ -241,7 +240,7 @@
"noImageFound": "Nu s-a găsit nicio imagine pentru acest marcaj de timp.",
"createObjectMask": "Creează Masca Obiectului",
"adjustAnnotationSettings": "Ajustează Setările de anotare",
"scrollViewTips": "Apasă pentru a vizualiza momentele semnificative din ciclul de viață al acestui obiect.",
"scrollViewTips": "Derulează pentru a vizualiza momentele semnificative din ciclul de viață al acestui obiect.",
"autoTrackingTips": "Pozițiile casetelor de delimitare vor fi inexacte pentru camerele cu urmărire automată.",
"count": "{{first}} din {{second}}",
"trackedPoint": "Punct Urmărit",

View File

@ -50,10 +50,6 @@
"playAlertVideos": {
"label": "Redă videoclipurile de alertă",
"desc": "În mod implicit, alertele recente din panoul Live se redau ca videoclipuri mici, ce ruleaza repetat. Dezactivează această opțiune pentru a afișa doar o imagine statică a alertelor recente pe acest dispozitiv/browser."
},
"displayCameraNames": {
"label": "Afișează întotdeauna numele camerelor",
"desc": "Afișează întotdeauna numele camerelor într-un indicator în tabloul de bord cu vizualizare live pe mai multe camere."
}
},
"storedLayouts": {
@ -707,9 +703,7 @@
},
"actions": {
"alert": "Marchează ca alertă",
"notification": "Trimite notificare",
"sub_label": "Adaugă subeticheta",
"attribute": "Adaugă atribut"
"notification": "Trimite notificare"
},
"dialog": {
"createTrigger": {
@ -764,7 +758,7 @@
},
"actions": {
"title": "Acțiuni",
"desc": "În mod implicit, Frigate trimite un mesaj MQTT pentru toate declanșatoarele. Subetichetele adaugă numele declanșatorului la eticheta obiectului. Atributele sunt metadate căutabile, stocate separat în metadatele obiectului urmărit.",
"desc": "Implicit, Frigate trimite un mesaj MQTT pentru toate declanșatoarele. Alegeți o acțiune suplimentară de efectuat atunci când acest declanșator se activează.",
"error": {
"min": "Trebuie selectată cel puțin o acțiune."
}
@ -1014,9 +1008,7 @@
},
"resolutionHigh": "O rezoluție de {{resolution}} poate cauza o utilizare crescută a resurselor.",
"resolutionLow": "O rezoluție de {{resolution}} poate fi prea mică pentru detectarea fiabilă a obiectelor mici."
},
"ffmpegModule": "Folosește modul de compatibilitate pentru stream-uri",
"ffmpegModuleDescription": "Dacă fluxul nu se încarcă după mai multe încercări, activați această opțiune. Când este activată, Frigate va folosi modulul ffmpeg împreună cu go2rtc. Aceasta poate oferi o compatibilitate mai bună cu unele fluxuri de camere."
}
}
},
"cameraManagement": {

View File

@ -287,8 +287,7 @@
},
"list": {
"two": "{{0}} a {{1}}",
"many": "{{items}}, a {{last}}",
"separatorWithSpace": ", "
"many": "{{items}}, a {{last}}"
},
"field": {
"optional": "Voliteľné",

View File

@ -5,9 +5,7 @@
"renameCategory": "Premenovať triedu",
"deleteCategory": "Odstrániť triedu",
"deleteImages": "Odstrániť obrázky",
"trainModel": "Model vlaku",
"addClassification": "Pridať klasifikáciu",
"deleteModels": "Odstrániť modely"
"trainModel": "Model vlaku"
},
"toast": {
"success": {
@ -15,15 +13,13 @@
"deletedImage": "Vymazané obrázky",
"categorizedImage": "Obrázok bol úspešne klasifikovaný",
"trainedModel": "Úspešne vyškolený model.",
"trainingModel": "Úspešne spustený modelový tréning.",
"deletedModel": "Úspešne zmazané {{count}} model (y)"
"trainingModel": "Úspešne spustený modelový tréning."
},
"error": {
"deleteImageFailed": "Nepodarilo sa odstrániť: {{errorMessage}}",
"deleteCategoryFailed": "Nepodarilo sa odstrániť triedu: {{errorMessage}}",
"categorizeFailed": "Nepodarilo sa kategorizovať obrázok: {{errorMessage}}",
"trainingFailed": "Nepodarilo sa spustiť trénovanie modelu: {{errorMessage}}",
"deleteModelFailed": "Nepodarilo sa odstrániť model: {{errorMessage}}"
"trainingFailed": "Nepodarilo sa spustiť trénovanie modelu: {{errorMessage}}"
}
},
"deleteCategory": {
@ -103,8 +99,7 @@
"stateRequiresTwoClasses": "Modely štátov vyžadujú aspoň 2 triedy",
"objectLabelRequired": "Vyberte označenie objektu",
"objectTypeRequired": "Vyberte typ klasifikácie"
},
"states": "Štátov"
}
},
"step2": {
"description": "Vyberte kamery a definujte oblasť, ktorú chcete pre každú kameru monitorovať. Model klasifikuje stav týchto oblastí.",
@ -137,14 +132,5 @@
},
"generateSuccess": "Vzorové obrázky boli úspešne vygenerované"
}
},
"deleteModel": {
"title": "Odstrániť klasifikačný model",
"single": "Ste si istí, že chcete odstrániť {{name}}? To bude trvalo odstrániť všetky súvisiace údaje vrátane obrázkov a vzdelávacích údajov. Táto akcia nemôže byť neporušená.",
"desc": "Ste si istí, že chcete odstrániť {{count}} model (y)? To bude trvalo odstrániť všetky súvisiace údaje vrátane obrázkov a vzdelávacích údajov. Táto akcia nemôže byť neporušená."
},
"menu": {
"objects": "Objekty",
"states": "Štátov"
}
}

View File

@ -111,8 +111,7 @@
"details": "detaily",
"snapshot": "snímka",
"video": "video",
"object_lifecycle": "životný cyklus objektu",
"thumbnail": "Náhľad"
"object_lifecycle": "životný cyklus objektu"
},
"objectLifecycle": {
"title": "Životný cyklus Objektu",
@ -241,7 +240,7 @@
"noImageFound": "Pre túto časovú pečiatku sa nenašiel žiadny obrázok.",
"createObjectMask": "Vytvoriť masku objektu",
"adjustAnnotationSettings": "Upravte nastavenia anotácií",
"scrollViewTips": "Kliknite pre zobrazenie významných momentov životného cyklu tohto objektu.",
"scrollViewTips": "Posúvaním zobrazíte významné momenty životného cyklu tohto objektu.",
"autoTrackingTips": "Pozície ohraničujúcich rámčekov budú pre kamery s automatickým sledovaním nepresné.",
"count": "{{first}} z {{second}}",
"trackedPoint": "Sledovaný bod",

View File

@ -1,6 +1,6 @@
{
"description": {
"addFace": "Sprievodca pridaním novej kolekcie do Knižnice tvárí.",
"addFace": "Sprievodca pridáním novej kolekcie do Knižnice tvárí.",
"invalidName": "Neplatné meno. Mená môžu obsahovať iba písmená, čísla, medzery, apostrofy, podčiarkovníky a spojovníky.",
"placeholder": "Zadajte názov pre túto kolekciu"
},
@ -23,7 +23,7 @@
"title": "Vytvoriť Zbierku",
"desc": "Vytvoriť novú zbierku",
"new": "Vytvoriť novú tvár",
"nextSteps": "Vybudovanie silného základu:<li>Použite kartu Nedávne rozpoznania na výber a trénovanie obrázkov pre každú rozpoznanú osobu.</li><li>Pre dosiahnutie najlepších výsledkov sa zamerajte na priame obrázky; vyhnite sa trénovaniu obrázkov, ktoré zachytávajú tváre pod uhlom.</li></ul>"
"nextSteps": "Vybudovanie pevných základov: <li> Pomocou záložky Tréning vyberte a trénujte obrázky pre každú detekovanú osobu.</li><li>Pre dosiahnutie najlepších výsledkov sa zamerajte na snímky s priamym pohľadom; vyhnite sa snímkam, ktoré zachytávajú tváre pod uhlom.</li></ul>"
},
"steps": {
"faceName": "Zadajte Meno tváre",

View File

@ -922,157 +922,7 @@
},
"snapshotConfig": {
"title": "Konfigurácia snímky",
"desc": "Odosielanie do Frigate+ vyžaduje, aby boli v konfigurácii povolené snímky aj snímky <code>clean_copy</code>.",
"cleanCopyWarning": "Niektoré kamery majú povolené snímky, ale voľba <code>clean_copy</code> je zakázaná. Pre možnosť odosielania snímok z týchto kamier do služby Frigate+ je nutné túto voľbu povoliť v konfigurácii snímok.",
"table": {
"camera": "Kamera",
"snapshots": "Snímky",
"cleanCopySnapshots": "<code>clean_copy</code> Snímky"
}
},
"modelInfo": {
"title": "Informácie o Modele",
"modelType": "Typ Modelu",
"trainDate": "Dátum Tréningu",
"baseModel": "Základný Model",
"plusModelType": {
"baseModel": "Základný Model",
"userModel": "Doladené"
},
"supportedDetectors": "Podporované Detektory",
"cameras": "Kamery",
"loading": "Načítavam informácie o modeli…",
"error": "Chyba načítania informácií o modeli",
"availableModels": "Dostupné Moduly",
"loadingAvailableModels": "Načítavam dostupné modely…",
"modelSelect": "Tu môžete vybrať dostupné modely zo služby Frigate+. Upozorňujeme, že je možné zvoliť iba modely kompatibilné s aktuálnou konfiguráciou detektora."
},
"unsavedChanges": "Neuložené zmeny nastavenia Frigate+",
"restart_required": "Vyžadovaný reštart (model Frigate+ zmenený)",
"toast": {
"success": "Nastavenia Frigate+ boli uložené. Reštartujte Frigate+ pre aplikovanie zmien.",
"error": "Chyba pri ukladaní zmien konfigurácie: {{errorMessage}}"
}
},
"triggers": {
"documentTitle": "Spúšťače",
"semanticSearch": {
"title": "Sémantické vyhľadávanie je vypnuté",
"desc": "Na používanie spúšťačov musí byť povolené sémantické vyhľadávanie."
},
"management": {
"title": "Spúšťače",
"desc": "Správa spúšťa {{camera}}. Použite typ miniatúry, aby ste spustili na podobných miniatúr na vybraných tracked objekt, a typ popisu, aby ste spustili podobné popisy na text, ktorý určíte."
},
"addTrigger": "Pridať Spúšťač",
"table": {
"name": "Meno",
"type": "Typ",
"content": "Obsah",
"threshold": "Prah",
"actions": "Akcie",
"noTriggers": "Pre túto kameru nie sú nakonfigurované žiadne spúšťače.",
"edit": "Upraviť",
"deleteTrigger": "Odstrániť spúšťač",
"lastTriggered": "Naposledy spustené"
},
"type": {
"thumbnail": "Náhľad",
"description": "Popis"
},
"actions": {
"notification": "Poslať upozornenie",
"sub_label": "Pridať vedľajší štítok",
"attribute": "Pridať atribút"
},
"dialog": {
"createTrigger": {
"title": "Vytvoriť spúšťač",
"desc": "Vytvorte spúšť pre kameru {{camera}}"
},
"editTrigger": {
"title": "Upraviť spúšťač",
"desc": "Upraviť nastavenia spúšťača na kamere {{camera}}"
},
"deleteTrigger": {
"title": "Odstrániť spúšťač",
"desc": "Naozaj chcete odstrániť spúšťač <strong>{{triggerName}}</strong>? Túto akciu nie je možné vrátiť späť."
},
"form": {
"name": {
"title": "Meno",
"placeholder": "Zadajte meno pre spúšťača",
"description": "Zadajte jedinečné meno alebo popis na identifikáciu tohto spúšťania",
"error": {
"minLength": "Názov musí mať aspoň 2 znaky.",
"invalidCharacters": "Meno môže obsahovať iba písmená, číslice, podčiarkovníky a pomlčky.",
"alreadyExists": "Spúšťač s týmto názvom už pre túto kameru existuje."
}
},
"enabled": {
"description": "Povoliť alebo zakázať tento spúšťač"
},
"type": {
"title": "Typ",
"placeholder": "Vybrať typ spúšťača",
"description": "Spustiť, keď sa zistí podobný popis sledovaného objektu",
"thumbnail": "Spustiť, keď sa zistí podobná miniatúra sledovaného objektu"
},
"content": {
"title": "Obsah",
"imagePlaceholder": "Vyberte miniatúru",
"textPlaceholder": "Zadajte obsah textu",
"imageDesc": "Zobrazujú sa iba posledné 100 miniatúr. Ak nemôžete nájsť požadovanú miniatúru, prečítajte si skôr objekty v preskúmať a nastaviť spúšťací z ponuky tam.",
"textDesc": "Zadajte text, aby ste spustili túto akciu, keď je detekovaný podobný popis objektu.",
"error": {
"required": "Obsah je potrebný."
}
},
"threshold": {
"title": "Prah",
"desc": "Nastavte prah podobnosti pre tento spúšťač. Vyšší prah znamená, že na spustenie spúšťača je potrebná bližšia zhoda.",
"error": {
"min": "Threshold musí byť aspoň 0",
"max": "Threshold musí byť na väčšine 1"
}
},
"actions": {
"title": "Akcie",
"desc": "V predvolenom nastavení Frigate odosiela MQTT správu pre všetky spúšťače. Zvoľte dodatočnú akciu, ktorá sa má vykonať, keď sa tento spúšťač aktivuje.",
"error": {
"min": "Musí byť vybraná aspoň jedna akcia."
}
}
}
},
"wizard": {
"title": "Vytvoriť spúšťač",
"step1": {
"description": "Konfigurujte základné nastavenia pre vašu spúšť."
},
"step2": {
"description": "Nastavte obsah, ktorý spustí túto akciu."
},
"step3": {
"description": "Konfigurovať prah a akcie pre tento spúšťač."
},
"steps": {
"nameAndType": "Meno a typ",
"configureData": "Konfigurovať údaje",
"thresholdAndActions": "Prah a akcie"
}
},
"toast": {
"success": {
"createTrigger": "Spúšťač {{name}} bol úspešne vytvorený.",
"updateTrigger": "Spúšťač {{name}} bol úspešne aktualizovaný.",
"deleteTrigger": "Spúšťač {{name}} bol úspešne zmazaný."
},
"error": {
"createTriggerFailed": "Nepodarilo sa vytvoriť spúšťač: {{errorMessage}}",
"updateTriggerFailed": "Nepodarilo sa aktualizovať spúšťač: {{errorMessage}}",
"deleteTriggerFailed": "Nepodarilo sa zmazať spúšťač: {{errorMessage}}"
}
"desc": "Odosielanie do Frigate+ vyžaduje, aby boli v konfigurácii povolené snímky aj snímky <code>clean_copy</code>."
}
}
}

View File

@ -250,10 +250,7 @@
"copyUrlToClipboard": "Webbadressen har kopierats till urklipp."
},
"label": {
"back": "Gå tillbaka",
"hide": "Dölj {{item}}",
"show": "Visa {{item}}",
"ID": "ID"
"back": "Gå tillbaka"
},
"unit": {
"speed": {
@ -277,14 +274,5 @@
"readTheDocumentation": "Läs dokumentationen",
"information": {
"pixels": "{{area}}px"
},
"list": {
"two": "{{0}} och {{1}}",
"many": "{{items}} och {{last}}",
"separatorWithSpace": ", "
},
"field": {
"optional": "Valfritt",
"internalID": "Det interna ID som Frigate använder i konfigurationen och databasen"
}
}

View File

@ -5,9 +5,7 @@
"renameCategory": "Byt namn på klass",
"deleteCategory": "Ta bort klass",
"deleteImages": "Ta bort bilder",
"trainModel": "Träna modellen",
"addClassification": "Lägg till klassificering",
"deleteModels": "Ta bort modeller"
"trainModel": "Träna modellen"
},
"toast": {
"success": {
@ -15,15 +13,13 @@
"deletedImage": "Raderade bilder",
"categorizedImage": "Lyckades klassificera bilden",
"trainedModel": "Modellen har tränats.",
"trainingModel": "Modellträning har startat.",
"deletedModel": "{{count}} modell(er) har raderats"
"trainingModel": "Modellträning har startat."
},
"error": {
"deleteImageFailed": "Misslyckades med att ta bort: {{errorMessage}}",
"deleteCategoryFailed": "Misslyckades med att ta bort klassen: {{errorMessage}}",
"categorizeFailed": "Misslyckades med att kategorisera bilden: {{errorMessage}}",
"trainingFailed": "Misslyckades med att starta modellträning: {{errorMessage}}",
"deleteModelFailed": "Misslyckades med att ta bort modellen: {{errorMessage}}"
"trainingFailed": "Misslyckades med att starta modellträning: {{errorMessage}}"
}
},
"deleteCategory": {
@ -47,8 +43,7 @@
},
"train": {
"title": "Nyligen tillagd klassificeringar",
"aria": "Välj senaste klassificeringar",
"titleShort": "Nyligen"
"aria": "Välj senaste klassificeringar"
},
"categories": "Klasser",
"createCategory": {
@ -87,64 +82,7 @@
"classificationType": "Klassificeringstyp",
"classificationTypeTip": "Lär dig mer om klassificeringstyper",
"classificationTypeDesc": "Underetiketter lägger till ytterligare text till objektetiketten (t.ex. 'Person: UPS'). Attribut är sökbara metadata som lagras separat i objektmetadata.",
"classificationSubLabel": "Underetikett",
"classificationAttribute": "Attribut",
"classes": "Klasser",
"states": "Tillstånd",
"classesTip": "Lär dig mer om klasser",
"classesStateDesc": "Definiera de olika tillstånd som ditt kameraområde kan vara i. Till exempel: \"öppen\" och \"stängd\" för en garageport.",
"classesObjectDesc": "Definiera de olika kategorierna som detekterade objekt ska klassificeras i. Till exempel: 'leveransperson', 'boende', 'främling' för personklassificering.",
"classPlaceholder": "Ange klassnamn...",
"errors": {
"nameRequired": "Modellnamn krävs",
"nameLength": "Modellnamnet måste vara högst 64 tecken långt",
"nameOnlyNumbers": "Modellnamnet får inte bara innehålla siffror",
"classRequired": "Minst 1 klass krävs",
"classesUnique": "Klassnamn måste vara unika",
"stateRequiresTwoClasses": "Tillståndsmodeller kräver minst två klasser",
"objectLabelRequired": "Välj en objektetikett",
"objectTypeRequired": "Vänligen välj en klassificeringstyp"
}
},
"step2": {
"description": "Välj kameror och definiera området som ska övervakas för varje kamera. Modellen kommer att klassificera tillståndet för dessa områden.",
"cameras": "Kameror",
"selectCamera": "Välj kamera",
"noCameras": "Klicka på + för att lägga till kameror",
"selectCameraPrompt": "Välj en kamera från listan för att definiera dess övervakningsområde"
},
"step3": {
"selectImagesPrompt": "Markera alla bilder med: {{className}}",
"selectImagesDescription": "Klicka på bilderna för att välja dem. Klicka på Fortsätt när du är klar med den här klass.",
"generating": {
"title": "Generera exempelbilder",
"description": "Frigate hämtar representativa bilder från dina inspelningar. Det kan ta en stund..."
},
"training": {
"title": "Träningsmodell",
"description": "Din modell tränas i bakgrunden. Stäng den här dialogrutan så börjar modellen köras så snart träningen är klar."
},
"retryGenerate": "Försök att generera igen",
"noImages": "Inga exempelbilder genererade",
"classifying": "Klassificering & Träning...",
"trainingStarted": "Träningen har börjat",
"errors": {
"noCameras": "Inga kameror konfigurerade",
"noObjectLabel": "Ingen objektetikett vald",
"generateFailed": "Misslyckades med att generera exempel: {{error}}",
"generationFailed": "Genereringen misslyckades. Försök igen.",
"classifyFailed": "Misslyckades med att klassificera bilder: {{error}}"
},
"generateSuccess": "Exempelbilder har genererats"
"classificationSubLabel": "Underetikett"
}
},
"deleteModel": {
"title": "Ta bort klassificeringsmodell",
"single": "Är du säker på att du vill ta bort {{name}}? Detta kommer att permanent ta bort all tillhörande data, inklusive bilder och träningsdata. Åtgärden kan inte ångras.",
"desc": "Är du säker på att du vill ta bort {{count}} modell(er)? Detta kommer att permanent ta bort all tillhörande data, inklusive bilder och träningsdata. Åtgärden kan inte ångras."
},
"menu": {
"objects": "Objekt",
"states": "Tillstånd"
}
}

View File

@ -43,17 +43,10 @@
"trackedObject_one": "objekt",
"trackedObject_other": "objekt",
"noObjectDetailData": "Inga objektdetaljdata tillgängliga.",
"label": "Detalj",
"settings": "Detaljvy inställningar",
"alwaysExpandActive": {
"title": "Expandera alltid aktivt",
"desc": "Expandera alltid objektinformationen för det aktiva granskningsobjektet när den är tillgänglig."
}
"label": "Detalj"
},
"objectTrack": {
"trackedPoint": "Spårad punkt",
"clickToSeek": "Klicka för att söka till den här tiden"
},
"zoomIn": "Zooma in",
"zoomOut": "Zooma ut"
}
}

View File

@ -109,8 +109,7 @@
"details": "detaljer",
"video": "video",
"snapshot": "ögonblicksbild",
"object_lifecycle": "objektets livscykel",
"thumbnail": "miniatyrbild"
"object_lifecycle": "objektets livscykel"
},
"trackedObjectDetails": "Detaljer om spårade objekt",
"objectLifecycle": {
@ -200,13 +199,6 @@
},
"showObjectDetails": {
"label": "Visa objektets plats"
},
"viewTrackingDetails": {
"label": "Visa spårningsinformation",
"aria": "Visa spårningsdetaljerna"
},
"hideObjectDetails": {
"label": "Dölj objektsökväg"
}
},
"dialog": {
@ -239,7 +231,7 @@
"noImageFound": "Ingen bild hittades för denna tidsstämpel.",
"createObjectMask": "Skapa objektmask",
"adjustAnnotationSettings": "Justera annoteringsinställningar",
"scrollViewTips": "Klicka för att se de viktiga ögonblicken i detta objekts livscykel.",
"scrollViewTips": "Scrolla för att se de viktiga ögonblicken i detta objekts livscykel.",
"autoTrackingTips": "Begränsningsrutornas positioner kommer att vara felaktiga för autospårningskameror.",
"count": "{{first}} av {{second}}",
"trackedPoint": "Spårad punkt",
@ -269,7 +261,7 @@
},
"offset": {
"label": "Annoteringsförskjutning",
"desc": "Denna data kommer från din kameras detekteringsflöde men läggs ovanpå bilder från inspelningsflödet. Det är osannolikt att de två strömmarna är helt synkroniserade. Som ett resultat kommer avgränsningsramen och filmmaterialet inte att radas upp perfekt. Du kan använda den här inställningen för att förskjuta anteckningarna framåt eller bakåt i tiden för att bättre anpassa dem till det inspelade materialet.",
"desc": "Denna data kommer från din kameras detekteringsflöde men läggs ovanpå bilder från inspelningsflödet. Det är osannolikt att de två strömmarna är helt synkroniserade. Som ett resultat kommer avgränsningsramen och filmmaterialet inte att radas upp perfekt. Fältet <code>annotation_offset</code> kan dock användas för att justera detta.",
"millisecondsToOffset": "Millisekunder för att förskjuta detektera annoteringar med. <em>Standard: 0</em>",
"tips": "TIPS: Föreställ dig ett händelseklipp med en person som går från vänster till höger. Om tidslinjens avgränsningsram konsekvent är till vänster om personen bör värdet minskas. På samma sätt, om en person går från vänster till höger och avgränsningsramen konsekvent är framför personen bör värdet ökas.",
"toast": {
@ -278,8 +270,7 @@
}
},
"carousel": {
"previous": "Föregående bild",
"next": "Nästa bild"
"previous": "Föregående bild"
}
}
}

View File

@ -25,11 +25,7 @@
"desc": "Som standard visas varningar på Live panelen som små loopande klipp. Inaktivera denna inställning för att bara visa en statisk bild av nya varningar på denna enhet/webbläsare.",
"label": "Spela upp Varnings videor"
},
"title": "Live Panel",
"displayCameraNames": {
"label": "Visa alltid kameranamn",
"desc": "Visa alltid kameranamnen i ett chip i instrumentpanelen för livevisning med flera kameror."
}
"title": "Live Panel"
},
"storedLayouts": {
"title": "Sparade Layouter",
@ -735,7 +731,7 @@
"triggers": {
"documentTitle": "Utlösare",
"management": {
"title": "Utlösare",
"title": "Utlösare hantering",
"desc": "Hantera utlösare för {{camera}}. Använd miniatyrtypen för att utlösa liknande miniatyrer som ditt valda spårade objekt och beskrivningstypen för att utlösa liknande beskrivningar av text du anger."
},
"addTrigger": "Lägg till utlösare",
@ -756,9 +752,7 @@
},
"actions": {
"notification": "Skicka avisering",
"alert": "Markera som Varning",
"sub_label": "Lägg till underetikett",
"attribute": "Lägg till attribut"
"alert": "Markera som Varning"
},
"dialog": {
"createTrigger": {
@ -776,28 +770,25 @@
"form": {
"name": {
"title": "Namn",
"placeholder": "Namnge denna utlösare",
"placeholder": "Ange utlösarens namn",
"error": {
"minLength": "Fältet måste vara minst 2 tecken långt.",
"invalidCharacters": "Fältet får bara innehålla bokstäver, siffror, understreck och bindestreck.",
"minLength": "Namnet måste vara minst 2 tecken lång.",
"invalidCharacters": "Namnet får bara innehålla bokstäver, siffror, understreck, och bindestreck.",
"alreadyExists": "En utlösare med detta namn finns redan för den här kameran."
},
"description": "Ange ett unikt namn eller en unik beskrivning för att identifiera den här utlösaren"
}
},
"enabled": {
"description": "Aktivera eller inaktivera den här utlösaren"
},
"type": {
"title": "Typ",
"placeholder": "Välj utlösartyp",
"description": "Utlöses när en liknande beskrivning av spårat objekt detekteras",
"thumbnail": "Utlöses när en liknande miniatyrbild av ett spårat objekt upptäcks"
"placeholder": "Välj utlösartyp"
},
"content": {
"title": "Innehåll",
"imagePlaceholder": "Välj en miniatyrbild",
"imagePlaceholder": "Välj en bild",
"textPlaceholder": "Ange textinnehåll",
"imageDesc": "Endast de senaste 100 miniatyrerna visas. Om du inte hittar önskad miniatyr kan du granska tidigare objekt i Utforska och skapa en utlösare från menyn där.",
"imageDesc": "Välj en bild för att utlösa den här åtgärden när en liknande bild upptäcks.",
"textDesc": "Ange text för att utlösa den här åtgärden när en liknande beskrivning av spårat objekt upptäcks.",
"error": {
"required": "Innehåll krävs."
@ -808,12 +799,11 @@
"error": {
"min": "Tröskelvärdet måste vara minst 0",
"max": "Tröskelvärdet får vara högst 1"
},
"desc": "Ställ in likhetströskeln för denna utlösare. En högre tröskel innebär att en bättre matchning krävs för att utlösaren ska aktiveras."
}
},
"actions": {
"title": "Åtgärder",
"desc": "Som standard utlöser Frigate ett MQTT-meddelande för alla utlösare. Underetiketter lägger till utlösarnamnet till objektetiketten. Attribut är sökbara metadata som lagras separat i de spårade objektmetadata.",
"desc": "Som standard utlöser Frigate ett MQTT-meddelande för alla utlösare. Välj en ytterligare åtgärd att utföra när den här utlösaren utlöses.",
"error": {
"min": "Minst en åtgärd måste väljas."
}
@ -840,23 +830,6 @@
"semanticSearch": {
"title": "Semantisk sökning är inaktiverad",
"desc": "Semantisk sökning måste vara aktiverad för att använda Utlösare."
},
"wizard": {
"title": "Skapa utlösare",
"step1": {
"description": "Konfigurera grundinställningarna för din trigger."
},
"step2": {
"description": "Ställ in innehållet som ska utlösa den här åtgärden."
},
"step3": {
"description": "Konfigurera tröskelvärdet och åtgärderna för den här utlösaren."
},
"steps": {
"nameAndType": "Namn och typ",
"configureData": "Konfigurera data",
"thresholdAndActions": "Tröskelvärde och åtgärder"
}
}
},
"cameraWizard": {
@ -913,15 +886,10 @@
"nameExists": "Kameranamnet finns redan",
"brands": {
"reolink-rtsp": "Reolink RTSP rekommenderas inte. Aktivera HTTP i kamerans firmwareinställningar och starta om guiden."
},
"customUrlRtspRequired": "Anpassade webbadresser måste börja med \"rtsp://\". Manuell konfiguration krävs för kameraströmmar som inte använder RTSP."
}
},
"docs": {
"reolink": "https://docs.frigate.video/configuration/camera_specific.html#reolink-cameras"
},
"testing": {
"probingMetadata": "Undersöker kamerans metadata...",
"fetchingSnapshot": "Hämtar kamerabild..."
}
},
"step2": {
@ -1000,9 +968,7 @@
},
"hikvision": {
"substreamWarning": "Delström 1 är låst till en låg upplösning. Många Hikvision kameror stöder ytterligare delströmmar som måste aktiveras i kamerans inställningar. Det rekommenderas att kontrollera och använda dessa strömmar om de är tillgängliga."
},
"resolutionHigh": "En upplösning på {{resolution}} kan orsaka ökad resursanvändning.",
"resolutionLow": "En upplösning på {{resolution}} kan vara för låg för tillförlitlig detektering av små objekt."
}
}
}
},

View File

@ -288,8 +288,7 @@
},
"list": {
"two": "{{0}} і {{1}}",
"many": "{{items}}, і {{last}}",
"separatorWithSpace": ", "
"many": "{{items}}, і {{last}}"
},
"field": {
"optional": "Необов'язково",

View File

@ -5,9 +5,7 @@
"renameCategory": "Перейменувати клас",
"deleteCategory": "Видалити клас",
"deleteImages": "Видалити зображення",
"trainModel": "Модель поїзда",
"addClassification": "Додати класифікацію",
"deleteModels": "Видалити моделі"
"trainModel": "Модель поїзда"
},
"toast": {
"success": {
@ -15,15 +13,13 @@
"deletedImage": "Видалені зображення",
"categorizedImage": "Зображення успішно класифіковано",
"trainedModel": "Успішно навчена модель.",
"trainingModel": "Успішно розпочато навчання моделі.",
"deletedModel": "Успішно видалено {{count}} моделей"
"trainingModel": "Успішно розпочато навчання моделі."
},
"error": {
"deleteImageFailed": "Не вдалося видалити: {{errorMessage}}",
"deleteCategoryFailed": "Не вдалося видалити клас: {{errorMessage}}",
"categorizeFailed": "Не вдалося класифікувати зображення: {{errorMessage}}",
"trainingFailed": "Не вдалося розпочати навчання моделі: {{errorMessage}}",
"deleteModelFailed": "Не вдалося видалити модель: {{errorMessage}}"
"trainingFailed": "Не вдалося розпочати навчання моделі: {{errorMessage}}"
}
},
"deleteCategory": {
@ -137,14 +133,5 @@
},
"generateSuccess": "Зразки зображень успішно створено"
}
},
"deleteModel": {
"title": "Видалити модель класифікації",
"single": "Ви впевнені, що хочете видалити {{name}}? Це назавжди видалить усі пов’язані дані, включаючи зображення та дані навчання. Цю дію не можна скасувати.",
"desc": "Ви впевнені, що хочете видалити {{count}} модель(і)? Це назавжди видалить усі пов’язані дані, включаючи зображення та навчальні дані. Цю дію не можна скасувати."
},
"menu": {
"objects": "Об'єкти",
"states": "Стани"
}
}

View File

@ -226,8 +226,7 @@
"details": "деталі",
"snapshot": "знімок",
"video": "відео",
"object_lifecycle": "життєвий цикл об'єкта",
"thumbnail": "мініатюра"
"object_lifecycle": "життєвий цикл об'єкта"
},
"exploreMore": "Дослідіть більше об'єктів {{label}}",
"aiAnalysis": {
@ -241,7 +240,7 @@
"noImageFound": "Для цієї позначки часу не знайдено зображення.",
"createObjectMask": "Створити маску об'єкта",
"adjustAnnotationSettings": "Налаштування параметрів анотацій",
"scrollViewTips": "Натисніть, щоб переглянути важливі моменти життєвого циклу цього об'єкта.",
"scrollViewTips": "Прокрутіть, щоб переглянути важливі моменти життєвого циклу цього об'єкта.",
"autoTrackingTips": "Положення обмежувальних рамок будуть неточними для камер з автоматичним відстеженням.",
"count": "{{first}} з {{second}}",
"trackedPoint": "Відстежувана точка",

View File

@ -1077,9 +1077,7 @@
},
"resolutionHigh": "Роздільна здатність {{resolution}} може призвести до збільшення використання ресурсів.",
"resolutionLow": "Роздільна здатність {{resolution}} може бути занадто низькою для надійного виявлення малих об'єктів."
},
"ffmpegModule": "Використовувати режим сумісності з потоками",
"ffmpegModuleDescription": "Якщо потік не завантажується після кількох спроб, спробуйте ввімкнути цю функцію. Коли вона ввімкнена, Frigate використовуватиме модуль ffmpeg з go2rtc. Це може забезпечити кращу сумісність з деякими потоками камер."
}
}
},
"cameraManagement": {

View File

@ -10,7 +10,6 @@
"loginFailed": "Đăng nhập không thành công",
"unknownError": "Lỗi không xác định. Kiểm tra nhật ký.",
"webUnknownError": "Lỗi không xác định. Kiểm tra nhật ký bảng điều khiển."
},
"firstTimeLogin": "Lần đầu đăng nhập? Thông tin đăng nhập được in trong nhật ký (log) của Frigate."
}
}
}

View File

@ -1,20 +1 @@
{
"documentTitle": "Mô Hình Phân Loại",
"button": {
"deleteClassificationAttempts": "Xóa Hình Ảnh Phân Loại",
"renameCategory": "Đổi Tên Lớp",
"deleteCategory": "Xoá Lớp",
"deleteImages": "Xoá Hình Ảnh",
"trainModel": "Huấn Luyện Mô Hình",
"addClassification": "Thêm Phân Loại",
"deleteModels": "Xoá Mô Hình"
},
"toast": {
"success": {
"deletedCategory": "Lớp Đã Bị Xoá",
"deletedImage": "Hình ảnh đã bị xóa",
"deletedModel": "Đã xóa thành công {{count}} mô hình",
"categorizedImage": "Phân Loại Hình Ảnh Thành Công"
}
}
}
{}

View File

@ -36,6 +36,5 @@
"markAsReviewed": "Đánh dấu là đã xem xét",
"markTheseItemsAsReviewed": "Đánh dấu các mục này là đã xem xét",
"suspiciousActivity": "Hoạt động đáng ngờ",
"threateningActivity": "Hoạt động đe dọa",
"zoomIn": "Phóng To"
"threateningActivity": "Hoạt động đe dọa"
}

View File

@ -13,10 +13,5 @@
"error": {
"renameExportFailed": "Đổi tên tệp xuất thất bại: {{errorMessage}}"
}
},
"tooltip": {
"shareExport": "Chia sẻ bản xuất",
"downloadVideo": "Tải video",
"editName": "Chỉnh sửa tên"
}
}

View File

@ -9,9 +9,7 @@
"object": "Gỡ lỗi - Frigate",
"general": "Cài đặt Chung - Frigate",
"frigatePlus": "Cài đặt Frigate+ - Frigate",
"motionTuner": "Bộ tinh chỉnh Chuyển động - Frigate",
"cameraManagement": "Quản Lý Camera - Frigate",
"cameraReview": "Cài Đặt Xem Lại Camera - Frigate"
"motionTuner": "Bộ tinh chỉnh Chuyển động - Frigate"
},
"notification": {
"toast": {

View File

@ -283,8 +283,7 @@
},
"list": {
"two": "{{0}} 和 {{1}}",
"many": "{{items}} 以及 {{last}}",
"separatorWithSpace": " "
"many": "{{items}} 以及 {{last}}"
},
"field": {
"optional": "可选",

View File

@ -34,8 +34,7 @@
"details": "详情",
"snapshot": "快照",
"video": "视频",
"object_lifecycle": "目标全周期",
"thumbnail": "缩略图"
"object_lifecycle": "目标全周期"
},
"objectLifecycle": {
"title": "目标全周期",
@ -237,7 +236,7 @@
"noImageFound": "在该时间内没找到图片。",
"createObjectMask": "创建目标遮罩",
"adjustAnnotationSettings": "调整注释设置",
"scrollViewTips": "点击以查看该目标全周期中的关键时刻。",
"scrollViewTips": "滚动以查看该目标全周期中的关键时刻。",
"autoTrackingTips": "自动追踪摄像头的边框定位可能不准确。",
"count": "{{first}} / {{second}}",
"trackedPoint": "追踪点",

View File

@ -5,9 +5,7 @@
"renameCategory": "重命名类别",
"deleteCategory": "删除类别",
"deleteImages": "删除图片",
"trainModel": "训练模型",
"addClassification": "添加分类",
"deleteModels": "删除模型"
"trainModel": "训练模型"
},
"toast": {
"success": {
@ -15,15 +13,13 @@
"deletedImage": "删除图片",
"categorizedImage": "成功分类图片",
"trainedModel": "训练模型成功。",
"trainingModel": "已开始训练模型。",
"deletedModel": "已删除 {{count}} 个模型"
"trainingModel": "已开始训练模型。"
},
"error": {
"deleteImageFailed": "删除失败:{{errorMessage}}",
"deleteCategoryFailed": "删除类别失败:{{errorMessage}}",
"categorizeFailed": "图片分类失败:{{errorMessage}}",
"trainingFailed": "开始训练模型失败:{{errorMessage}}",
"deleteModelFailed": "删除模型失败:{{errorMessage}}"
"trainingFailed": "开始训练模型失败:{{errorMessage}}"
}
},
"deleteCategory": {
@ -103,8 +99,7 @@
"stateRequiresTwoClasses": "状态模型至少需要两个类别",
"objectLabelRequired": "请选择一个目标标签",
"objectTypeRequired": "请选择一个目标标签"
},
"states": "状态"
}
},
"step2": {
"description": "选择摄像头,并为摄像头定义要监控的区域。模型将对这些区域的状态进行分类。",
@ -137,14 +132,5 @@
},
"generateSuccess": "样本图片生成成功"
}
},
"deleteModel": {
"title": "删除分类模型",
"single": "你确定要删除 {{name}} 吗?此操作将永久删除所有相关数据,包括图片和训练数据,且无法撤销。",
"desc": "你确定要删除 {{count}} 个模型吗?此操作将永久删除所有相关数据,包括图片和训练数据,且无法撤销。"
},
"menu": {
"objects": "目标",
"states": "状态"
}
}

View File

@ -181,7 +181,6 @@ type GroupedClassificationCardProps = {
selectedItems: string[];
i18nLibrary: string;
objectType: string;
noClassificationLabel?: string;
onClick: (data: ClassificationItemData | undefined) => void;
children?: (data: ClassificationItemData) => React.ReactNode;
};
@ -191,7 +190,6 @@ export function GroupedClassificationCard({
threshold,
selectedItems,
i18nLibrary,
noClassificationLabel = "details.none",
onClick,
children,
}: GroupedClassificationCardProps) {
@ -224,14 +222,10 @@ export function GroupedClassificationCard({
const bestTyped: ClassificationItemData = best;
return {
...bestTyped,
name: event
? event.sub_label && event.sub_label !== "none"
? event.sub_label
: t(noClassificationLabel)
: bestTyped.name,
name: event ? (event.sub_label ?? t("details.unknown")) : bestTyped.name,
score: event?.data?.sub_label_score || bestTyped.score,
};
}, [group, event, noClassificationLabel, t]);
}, [group, event, t]);
const bestScoreStatus = useMemo(() => {
if (!bestItem?.score || !threshold) {
@ -317,10 +311,8 @@ export function GroupedClassificationCard({
isMobile && "px-2",
)}
>
{event?.sub_label && event.sub_label !== "none"
? event.sub_label
: t(noClassificationLabel)}
{event?.sub_label && event.sub_label !== "none" && (
{event?.sub_label ? event.sub_label : t("details.unknown")}
{event?.sub_label && (
<div
className={cn(
"",

View File

@ -317,21 +317,6 @@ export default function Step3ChooseExamples({
return unclassifiedImages.length === 0;
}, [unclassifiedImages]);
const handleBack = useCallback(() => {
if (currentClassIndex > 0) {
const previousClass = allClasses[currentClassIndex - 1];
setCurrentClassIndex((prev) => prev - 1);
// Restore selections for the previous class
const previousSelections = Object.entries(imageClassifications)
.filter(([_, className]) => className === previousClass)
.map(([imageName, _]) => imageName);
setSelectedImages(new Set(previousSelections));
} else {
onBack();
}
}, [currentClassIndex, allClasses, imageClassifications, onBack]);
return (
<div className="flex flex-col gap-6">
{isTraining ? (
@ -435,7 +420,7 @@ export default function Step3ChooseExamples({
{!isTraining && (
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
<Button type="button" onClick={handleBack} className="sm:flex-1">
<Button type="button" onClick={onBack} className="sm:flex-1">
{t("button.back", { ns: "common" })}
</Button>
<Button

View File

@ -348,26 +348,6 @@ export function GeneralFilterContent({
onClose,
}: GeneralFilterContentProps) {
const { t } = useTranslation(["components/filter"]);
const { data: config } = useSWR<FrigateConfig>("config", {
revalidateOnFocus: false,
});
const allAudioListenLabels = useMemo<string[]>(() => {
if (!config) {
return [];
}
const labels = new Set<string>();
Object.values(config.cameras).forEach((camera) => {
if (camera?.audio?.enabled) {
camera.audio.listen.forEach((label) => {
labels.add(label);
});
}
});
return [...labels].sort();
}, [config]);
return (
<>
<div className="overflow-x-hidden">
@ -393,10 +373,7 @@ export function GeneralFilterContent({
{allLabels.map((item) => (
<FilterSwitch
key={item}
label={getTranslatedLabel(
item,
allAudioListenLabels.includes(item) ? "audio" : "object",
)}
label={getTranslatedLabel(item)}
isChecked={currentLabels?.includes(item) ?? false}
onCheckedChange={(isChecked) => {
if (isChecked) {

View File

@ -58,47 +58,6 @@ export default function ObjectTrackOverlay({
const effectiveCurrentTime = currentTime - annotationOffset / 1000;
const {
pathStroke,
pointRadius,
pointStroke,
zoneStroke,
boxStroke,
highlightRadius,
} = useMemo(() => {
const BASE_WIDTH = 1280;
const BASE_HEIGHT = 720;
const BASE_PATH_STROKE = 5;
const BASE_POINT_RADIUS = 7;
const BASE_POINT_STROKE = 3;
const BASE_ZONE_STROKE = 5;
const BASE_BOX_STROKE = 5;
const BASE_HIGHLIGHT_RADIUS = 5;
const scale = Math.sqrt(
(videoWidth * videoHeight) / (BASE_WIDTH * BASE_HEIGHT),
);
const pathStroke = Math.max(1, Math.round(BASE_PATH_STROKE * scale));
const pointRadius = Math.max(2, Math.round(BASE_POINT_RADIUS * scale));
const pointStroke = Math.max(1, Math.round(BASE_POINT_STROKE * scale));
const zoneStroke = Math.max(1, Math.round(BASE_ZONE_STROKE * scale));
const boxStroke = Math.max(1, Math.round(BASE_BOX_STROKE * scale));
const highlightRadius = Math.max(
2,
Math.round(BASE_HIGHLIGHT_RADIUS * scale),
);
return {
pathStroke,
pointRadius,
pointStroke,
zoneStroke,
boxStroke,
highlightRadius,
};
}, [videoWidth, videoHeight]);
// Fetch all event data in a single request (CSV ids)
const { data: eventsData } = useSWR<Event[]>(
selectedObjectIds.length > 0
@ -239,21 +198,16 @@ export default function ObjectTrackOverlay({
b.timestamp - a.timestamp,
)[0]?.data?.zones || [];
// bounding box - only show if there's a timeline event at/near the current time with a box
// Search all timeline events (not just those before current time) to find one matching the seek position
const nearbyTimelineEvent = timelineData
?.filter((event: TrackingDetailsSequence) => event.data.box)
.sort(
(a: TrackingDetailsSequence, b: TrackingDetailsSequence) =>
Math.abs(a.timestamp - effectiveCurrentTime) -
Math.abs(b.timestamp - effectiveCurrentTime),
)
.find(
(event: TrackingDetailsSequence) =>
Math.abs(event.timestamp - effectiveCurrentTime) <= TOLERANCE,
);
const currentBox = nearbyTimelineEvent?.data?.box;
// bounding box (with tolerance for browsers with seek precision by-design issues)
const boxCandidates = timelineData?.filter(
(event: TrackingDetailsSequence) =>
event.timestamp <= effectiveCurrentTime + TOLERANCE &&
event.data.box,
);
const currentBox = boxCandidates?.sort(
(a: TrackingDetailsSequence, b: TrackingDetailsSequence) =>
b.timestamp - a.timestamp,
)[0]?.data?.box;
return {
objectId,
@ -379,7 +333,7 @@ export default function ObjectTrackOverlay({
points={zone.points}
fill={zone.fill}
stroke={zone.stroke}
strokeWidth={zoneStroke}
strokeWidth="5"
opacity="0.7"
/>
))}
@ -399,7 +353,7 @@ export default function ObjectTrackOverlay({
d={generateStraightPath(absolutePositions)}
fill="none"
stroke={objData.color}
strokeWidth={pathStroke}
strokeWidth="5"
strokeLinecap="round"
strokeLinejoin="round"
/>
@ -411,13 +365,13 @@ export default function ObjectTrackOverlay({
<circle
cx={pos.x}
cy={pos.y}
r={pointRadius}
r="7"
fill={getPointColor(
objData.color,
pos.lifecycle_item?.class_type,
)}
stroke="white"
strokeWidth={pointStroke}
strokeWidth="3"
style={{ cursor: onSeekToTime ? "pointer" : "default" }}
onClick={() => handlePointClick(pos.timestamp)}
/>
@ -446,7 +400,7 @@ export default function ObjectTrackOverlay({
height={objData.currentBox[3] * videoHeight}
fill="none"
stroke={objData.color}
strokeWidth={boxStroke}
strokeWidth="5"
opacity="0.9"
/>
<circle
@ -458,10 +412,10 @@ export default function ObjectTrackOverlay({
(objData.currentBox[1] + objData.currentBox[3]) *
videoHeight
}
r={highlightRadius}
r="5"
fill="rgb(255, 255, 0)" // yellow highlight
stroke={objData.color}
strokeWidth={boxStroke}
strokeWidth="5"
opacity="1"
/>
</g>

View File

@ -8,7 +8,7 @@ import Heading from "@/components/ui/heading";
import { FrigateConfig } from "@/types/frigateConfig";
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
import { getIconForLabel } from "@/utils/iconUtil";
import { LuCircle, LuFolderX, LuSettings } from "react-icons/lu";
import { LuCircle, LuSettings } from "react-icons/lu";
import { cn } from "@/lib/utils";
import {
Tooltip,
@ -37,12 +37,9 @@ import { HiDotsHorizontal } from "react-icons/hi";
import axios from "axios";
import { toast } from "sonner";
import { useDetailStream } from "@/context/detail-stream-context";
import { isDesktop, isIOS, isMobileOnly, isSafari } from "react-device-detect";
import { isDesktop, isIOS } from "react-device-detect";
import Chip from "@/components/indicators/Chip";
import { FaDownload, FaHistory } from "react-icons/fa";
import { useApiHost } from "@/api";
import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator";
import ObjectTrackOverlay from "../ObjectTrackOverlay";
type TrackingDetailsProps = {
className?: string;
@ -59,19 +56,9 @@ export function TrackingDetails({
const videoRef = useRef<HTMLVideoElement | null>(null);
const { t } = useTranslation(["views/explore"]);
const navigate = useNavigate();
const apiHost = useApiHost();
const imgRef = useRef<HTMLImageElement | null>(null);
const [imgLoaded, setImgLoaded] = useState(false);
const [displaySource, _setDisplaySource] = useState<"video" | "image">(
"video",
);
const { setSelectedObjectIds, annotationOffset, setAnnotationOffset } =
useDetailStream();
// manualOverride holds a record-stream timestamp explicitly chosen by the
// user (eg, clicking a lifecycle row). When null we display `currentTime`.
const [manualOverride, setManualOverride] = useState<number | null>(null);
// event.start_time is detect time, convert to record, then subtract padding
const [currentTime, setCurrentTime] = useState(
(event.start_time ?? 0) + annotationOffset / 1000 - REVIEW_PADDING,
@ -86,13 +73,9 @@ export function TrackingDetails({
const { data: config } = useSWR<FrigateConfig>("config");
// Use manualOverride (set when seeking in image mode) if present so
// lifecycle rows and overlays follow image-mode seeks. Otherwise fall
// back to currentTime used for video mode.
const effectiveTime = useMemo(() => {
const displayedRecordTime = manualOverride ?? currentTime;
return displayedRecordTime - annotationOffset / 1000;
}, [manualOverride, currentTime, annotationOffset]);
return currentTime - annotationOffset / 1000;
}, [currentTime, annotationOffset]);
const containerRef = useRef<HTMLDivElement | null>(null);
const [_selectedZone, setSelectedZone] = useState("");
@ -135,30 +118,20 @@ export function TrackingDetails({
const handleLifecycleClick = useCallback(
(item: TrackingDetailsSequence) => {
if (!videoRef.current && !imgRef.current) return;
if (!videoRef.current) return;
// Convert lifecycle timestamp (detect stream) to record stream time
const targetTimeRecord = item.timestamp + annotationOffset / 1000;
if (displaySource === "image") {
// For image mode: set a manual override timestamp and update
// currentTime so overlays render correctly.
setManualOverride(targetTimeRecord);
setCurrentTime(targetTimeRecord);
return;
}
// For video mode: convert to video-relative time and seek player
// Convert to video-relative time for seeking
const eventStartRecord =
(event.start_time ?? 0) + annotationOffset / 1000;
const videoStartTime = eventStartRecord - REVIEW_PADDING;
const relativeTime = targetTimeRecord - videoStartTime;
if (videoRef.current) {
videoRef.current.currentTime = relativeTime;
}
videoRef.current.currentTime = relativeTime;
},
[event.start_time, annotationOffset, displaySource],
[event.start_time, annotationOffset],
);
const formattedStart = config
@ -199,20 +172,11 @@ export function TrackingDetails({
}, [eventSequence]);
useEffect(() => {
if (seekToTimestamp === null) return;
if (displaySource === "image") {
// For image mode, set the manual override so the snapshot updates to
// the exact record timestamp.
setManualOverride(seekToTimestamp);
setSeekToTimestamp(null);
return;
}
if (seekToTimestamp === null || !videoRef.current) return;
// seekToTimestamp is a record stream timestamp
// event.start_time is detect stream time, convert to record
// The video clip starts at (eventStartRecord - REVIEW_PADDING)
if (!videoRef.current) return;
const eventStartRecord = event.start_time + annotationOffset / 1000;
const videoStartTime = eventStartRecord - REVIEW_PADDING;
const relativeTime = seekToTimestamp - videoStartTime;
@ -220,14 +184,7 @@ export function TrackingDetails({
videoRef.current.currentTime = relativeTime;
}
setSeekToTimestamp(null);
}, [
seekToTimestamp,
event.start_time,
annotationOffset,
apiHost,
event.camera,
displaySource,
]);
}, [seekToTimestamp, event.start_time, annotationOffset]);
const isWithinEventRange =
effectiveTime !== undefined &&
@ -330,27 +287,6 @@ export function TrackingDetails({
[event.start_time, annotationOffset],
);
const [src, setSrc] = useState(
`${apiHost}api/${event.camera}/recordings/${currentTime + REVIEW_PADDING}/snapshot.jpg?height=500`,
);
const [hasError, setHasError] = useState(false);
// Derive the record timestamp to display: manualOverride if present,
// otherwise use currentTime.
const displayedRecordTime = manualOverride ?? currentTime;
useEffect(() => {
if (displayedRecordTime) {
const newSrc = `${apiHost}api/${event.camera}/recordings/${displayedRecordTime}/snapshot.jpg?height=500`;
setSrc(newSrc);
}
setImgLoaded(false);
setHasError(false);
// we know that these deps are correct
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [displayedRecordTime]);
if (!config) {
return <ActivityIndicator />;
}
@ -368,10 +304,9 @@ export function TrackingDetails({
<div
className={cn(
"flex items-center justify-center",
"flex w-full items-center justify-center",
isDesktop && "overflow-hidden",
cameraAspect === "tall" ? "max-h-[50dvh] lg:max-h-[70dvh]" : "w-full",
cameraAspect === "tall" && isMobileOnly && "w-full",
cameraAspect !== "tall" && isDesktop && "flex-[3]",
)}
style={{ aspectRatio: aspectRatio }}
@ -383,75 +318,21 @@ export function TrackingDetails({
cameraAspect === "tall" ? "h-full" : "w-full",
)}
>
{displaySource == "video" && (
<HlsVideoPlayer
videoRef={videoRef}
containerRef={containerRef}
visible={true}
currentSource={videoSource}
hotKeys={false}
supportsFullscreen={false}
fullscreen={false}
frigateControls={true}
onTimeUpdate={handleTimeUpdate}
onSeekToTime={handleSeekToTime}
isDetailMode={true}
camera={event.camera}
currentTimeOverride={currentTime}
/>
)}
{displaySource == "image" && (
<>
<ImageLoadingIndicator
className="absolute inset-0"
imgLoaded={imgLoaded}
/>
{hasError && (
<div className="relative aspect-video">
<div className="flex flex-col items-center justify-center p-20 text-center">
<LuFolderX className="size-16" />
{t("objectLifecycle.noImageFound")}
</div>
</div>
)}
<div
className={cn("relative", imgLoaded ? "visible" : "invisible")}
>
<div className="absolute z-50 size-full">
<ObjectTrackOverlay
key={`overlay-${displayedRecordTime}`}
camera={event.camera}
showBoundingBoxes={true}
currentTime={displayedRecordTime}
videoWidth={imgRef?.current?.naturalWidth ?? 0}
videoHeight={imgRef?.current?.naturalHeight ?? 0}
className="absolute inset-0 z-10"
onSeekToTime={handleSeekToTime}
/>
</div>
<img
key={event.id}
ref={imgRef}
className={cn(
"max-h-[50dvh] max-w-full select-none rounded-lg object-contain",
)}
loading={isSafari ? "eager" : "lazy"}
style={
isIOS
? {
WebkitUserSelect: "none",
WebkitTouchCallout: "none",
}
: undefined
}
draggable={false}
src={src}
onLoad={() => setImgLoaded(true)}
onError={() => setHasError(true)}
/>
</div>
</>
)}
<HlsVideoPlayer
videoRef={videoRef}
containerRef={containerRef}
visible={true}
currentSource={videoSource}
hotKeys={false}
supportsFullscreen={false}
fullscreen={false}
frigateControls={true}
onTimeUpdate={handleTimeUpdate}
onSeekToTime={handleSeekToTime}
isDetailMode={true}
camera={event.camera}
currentTimeOverride={currentTime}
/>
<div
className={cn(
"absolute top-2 z-[5] flex items-center gap-2",

View File

@ -174,7 +174,9 @@ export default function CameraWizardDialog({
...(friendlyName && { friendly_name: friendlyName }),
ffmpeg: {
inputs: wizardData.streams.map((stream, index) => {
if (stream.restream) {
const isRestreamed =
wizardData.restreamIds?.includes(stream.id) ?? false;
if (isRestreamed) {
const go2rtcStreamName =
wizardData.streams!.length === 1
? finalCameraName
@ -232,11 +234,7 @@ export default function CameraWizardDialog({
wizardData.streams!.length === 1
? finalCameraName
: `${finalCameraName}_${index + 1}`;
const streamUrl = stream.useFfmpeg
? `ffmpeg:${stream.url}`
: stream.url;
go2rtcStreams[streamName] = [streamUrl];
go2rtcStreams[streamName] = [stream.url];
});
if (Object.keys(go2rtcStreams).length > 0) {

View File

@ -608,12 +608,6 @@ export default function Step1NameCamera({
</div>
)}
{isTesting && (
<div className="flex items-center gap-2 text-sm text-muted-foreground">
<ActivityIndicator className="size-4" />
{testStatus}
</div>
)}
<div className="flex flex-col gap-3 pt-3 sm:flex-row sm:justify-end sm:gap-4">
<Button
type="button"
@ -641,7 +635,10 @@ export default function Step1NameCamera({
variant="select"
className="flex items-center justify-center gap-2 sm:flex-1"
>
{t("cameraWizard.step1.testConnection")}
{isTesting && <ActivityIndicator className="size-4" />}
{isTesting && testStatus
? testStatus
: t("cameraWizard.step1.testConnection")}
</Button>
)}
</div>

View File

@ -201,12 +201,16 @@ export default function Step2StreamConfig({
const setRestream = useCallback(
(streamId: string) => {
const stream = streams.find((s) => s.id === streamId);
if (!stream) return;
updateStream(streamId, { restream: !stream.restream });
const currentIds = wizardData.restreamIds || [];
const isSelected = currentIds.includes(streamId);
const newIds = isSelected
? currentIds.filter((id) => id !== streamId)
: [...currentIds, streamId];
onUpdate({
restreamIds: newIds,
});
},
[streams, updateStream],
[wizardData.restreamIds, onUpdate],
);
const hasDetectRole = streams.some((s) => s.roles.includes("detect"));
@ -431,7 +435,9 @@ export default function Step2StreamConfig({
{t("cameraWizard.step2.go2rtc")}
</span>
<Switch
checked={stream.restream || false}
checked={(wizardData.restreamIds || []).includes(
stream.id,
)}
onCheckedChange={() => setRestream(stream.id)}
/>
</div>

View File

@ -1,13 +1,7 @@
import { Button } from "@/components/ui/button";
import { Badge } from "@/components/ui/badge";
import { Switch } from "@/components/ui/switch";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/ui/popover";
import { useTranslation } from "react-i18next";
import { LuRotateCcw, LuInfo } from "react-icons/lu";
import { LuRotateCcw } from "react-icons/lu";
import { useState, useCallback, useMemo, useEffect } from "react";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import axios from "axios";
@ -222,6 +216,7 @@ export default function Step3Validation({
brandTemplate: wizardData.brandTemplate,
customUrl: wizardData.customUrl,
streams: wizardData.streams,
restreamIds: wizardData.restreamIds,
};
onSave(configData);
@ -327,51 +322,6 @@ export default function Step3Validation({
</div>
)}
{result?.success && (
<div className="mb-3 flex items-center justify-between">
<div className="flex items-center gap-2">
<span className="text-sm">
{t("cameraWizard.step3.ffmpegModule")}
</span>
<Popover>
<PopoverTrigger asChild>
<Button
variant="ghost"
size="sm"
className="h-4 w-4 p-0"
>
<LuInfo className="size-3" />
</Button>
</PopoverTrigger>
<PopoverContent className="pointer-events-auto w-80 text-xs">
<div className="space-y-2">
<div className="font-medium">
{t("cameraWizard.step3.ffmpegModule")}
</div>
<div className="text-muted-foreground">
{t(
"cameraWizard.step3.ffmpegModuleDescription",
)}
</div>
</div>
</PopoverContent>
</Popover>
</div>
<Switch
checked={stream.useFfmpeg || false}
onCheckedChange={(checked) => {
onUpdate({
streams: streams.map((s) =>
s.id === stream.id
? { ...s, useFfmpeg: checked }
: s,
),
});
}}
/>
</div>
)}
<div className="mb-2 flex flex-col justify-between gap-1 md:flex-row md:items-center">
<span className="break-all text-sm text-muted-foreground">
{stream.url}
@ -541,7 +491,8 @@ function StreamIssues({
// Restreaming check
if (stream.roles.includes("record")) {
if (stream.restream) {
const restreamIds = wizardData.restreamIds || [];
if (restreamIds.includes(stream.id)) {
result.push({
type: "warning",
message: t("cameraWizard.step3.issues.restreamingWarning"),
@ -709,10 +660,9 @@ function StreamPreview({ stream, onBandwidthUpdate }: StreamPreviewProps) {
useEffect(() => {
// Register stream with go2rtc
const streamUrl = stream.useFfmpeg ? `ffmpeg:${stream.url}` : stream.url;
axios
.put(`go2rtc/streams/${streamId}`, null, {
params: { src: streamUrl },
params: { src: stream.url },
})
.then(() => {
// Add small delay to allow go2rtc api to run and initialize the stream
@ -730,7 +680,7 @@ function StreamPreview({ stream, onBandwidthUpdate }: StreamPreviewProps) {
// do nothing on cleanup errors - go2rtc won't consume the streams
});
};
}, [stream.url, stream.useFfmpeg, streamId]);
}, [stream.url, streamId]);
const resolution = stream.testResult?.resolution;
let aspectRatio = "16/9";

View File

@ -845,7 +845,6 @@ function FaceAttemptGroup({
selectedItems={selectedFaces}
i18nLibrary="views/faceLibrary"
objectType="person"
noClassificationLabel="details.unknown"
onClick={(data) => {
if (data) {
onClickFaces([data.filename], true);

View File

@ -85,8 +85,6 @@ export type StreamConfig = {
quality?: string;
testResult?: TestResult;
userTested?: boolean;
useFfmpeg?: boolean;
restream?: boolean;
};
export type TestResult = {
@ -107,6 +105,7 @@ export type WizardFormData = {
brandTemplate?: CameraBrand;
customUrl?: string;
streams?: StreamConfig[];
restreamIds?: string[];
};
// API Response Types
@ -147,7 +146,6 @@ export type CameraConfigData = {
inputs: {
path: string;
roles: string[];
input_args?: string;
}[];
};
live?: {

View File

@ -13,8 +13,7 @@ function formatZonesList(zones: string[]): string {
});
}
const separatorWithSpace = t("list.separatorWithSpace", { ns: "common" });
const allButLast = zones.slice(0, -1).join(separatorWithSpace);
const allButLast = zones.slice(0, -1).join(", ");
return t("list.many", {
items: allButLast,
last: zones[zones.length - 1],

View File

@ -10,7 +10,7 @@ import {
CustomClassificationModelConfig,
FrigateConfig,
} from "@/types/frigateConfig";
import { useCallback, useEffect, useMemo, useState } from "react";
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { useTranslation } from "react-i18next";
import { FaFolderPlus } from "react-icons/fa";
import { MdModelTraining } from "react-icons/md";
@ -21,6 +21,7 @@ import Heading from "@/components/ui/heading";
import { useOverlayState } from "@/hooks/use-overlay-state";
import axios from "axios";
import { toast } from "sonner";
import useKeyboardListener from "@/hooks/use-keyboard-listener";
import {
DropdownMenu,
DropdownMenuContent,
@ -211,44 +212,42 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
}>(`classification/${config.name}/dataset`, { revalidateOnFocus: false });
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
const bypassDialogRef = useRef(false);
useKeyboardListener(["Shift"], (_, modifiers) => {
bypassDialogRef.current = modifiers.shift;
return false;
});
const handleDelete = useCallback(async () => {
try {
await axios.delete(`classification/${config.name}`);
await axios.put("/config/set", {
requires_restart: 0,
update_topic: `config/classification/custom/${config.name}`,
config_data: {
classification: {
custom: {
[config.name]: "",
},
},
},
await axios
.delete(`classification/${config.name}`)
.then((resp) => {
if (resp.status == 200) {
toast.success(t("toast.success.deletedModel", { count: 1 }), {
position: "top-center",
});
onDelete();
}
})
.catch((error) => {
const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(t("toast.error.deleteModelFailed", { errorMessage }), {
position: "top-center",
});
});
toast.success(t("toast.success.deletedModel", { count: 1 }), {
position: "top-center",
});
onDelete();
} catch (err) {
const error = err as {
response?: { data?: { message?: string; detail?: string } };
};
const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(t("toast.error.deleteModelFailed", { errorMessage }), {
position: "top-center",
});
}
}, [config, onDelete, t]);
const handleDeleteClick = useCallback((e: React.MouseEvent) => {
e.stopPropagation();
setDeleteDialogOpen(true);
}, []);
const handleDeleteClick = useCallback(() => {
if (bypassDialogRef.current) {
handleDelete();
} else {
setDeleteDialogOpen(true);
}
}, [handleDelete]);
const coverImage = useMemo(() => {
if (!dataset) {
@ -305,7 +304,7 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
className="size-full"
src={`${baseUrl}clips/${config.name}/dataset/${coverImage?.name}/${coverImage?.img}`}
/>
<ImageShadowOverlay lowerClassName="h-[30%] z-0" />
<ImageShadowOverlay />
<div className="absolute bottom-2 left-3 text-lg text-white smart-capitalize">
{config.name}
</div>
@ -316,13 +315,14 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) {
<FiMoreVertical className="size-5 text-white" />
</BlurredIconButton>
</DropdownMenuTrigger>
<DropdownMenuContent
align="end"
onClick={(e) => e.stopPropagation()}
>
<DropdownMenuContent align="end">
<DropdownMenuItem onClick={handleDeleteClick}>
<LuTrash2 className="mr-2 size-4" />
<span>{t("button.delete", { ns: "common" })}</span>
<span>
{bypassDialogRef.current
? t("button.deleteNow", { ns: "common" })
: t("button.delete", { ns: "common" })}
</span>
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>

View File

@ -961,7 +961,6 @@ function ObjectTrainGrid({
selectedItems={selectedImages}
i18nLibrary="views/classificationModel"
objectType={model.object_config?.objects?.at(0) ?? "Object"}
noClassificationLabel="details.none"
onClick={(data) => {
if (data) {
onClickImages([data.filename], true);