From ef686455230f6dfc750ecbeb08e7153734c996ca Mon Sep 17 00:00:00 2001 From: Chirayu Rai Date: Tue, 26 Aug 2025 15:23:42 -0700 Subject: [PATCH] Reverted changes to classification and audio --- frigate/config/classification.py | 26 -------------------------- frigate/events/audio.py | 18 ------------------ 2 files changed, 44 deletions(-) diff --git a/frigate/config/classification.py b/frigate/config/classification.py index cbfe58597..63e89421c 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -163,32 +163,6 @@ class CameraSemanticSearchConfig(FrigateBaseModel): model_config = ConfigDict(extra="forbid", protected_namespaces=()) -class TriggerConfig(FrigateBaseModel): - enabled: bool = Field(default=True, title="Enable this trigger") - type: TriggerType = Field(default=TriggerType.DESCRIPTION, title="Type of trigger") - data: str = Field(title="Trigger content (text phrase or image ID)") - threshold: float = Field( - title="Confidence score required to run the trigger", - default=0.8, - gt=0.0, - le=1.0, - ) - actions: List[TriggerAction] = Field( - default=[], title="Actions to perform when trigger is matched" - ) - - model_config = ConfigDict(extra="forbid", protected_namespaces=()) - - -class CameraSemanticSearchConfig(FrigateBaseModel): - triggers: Dict[str, TriggerConfig] = Field( - default={}, - title="Trigger actions on tracked objects that match existing thumbnails or descriptions", - ) - - model_config = ConfigDict(extra="forbid", protected_namespaces=()) - - class FaceRecognitionConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable face recognition.") model_size: str = Field( diff --git a/frigate/events/audio.py b/frigate/events/audio.py index d6fed826b..31b9a7f3c 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -180,24 +180,6 @@ class AudioEventMaintainer(threading.Thread): ) self.detection_publisher = DetectionPublisher(DetectionTypeEnum.audio.value) - if self.camera_config.audio_transcription.enabled_in_config: - # init the transcription processor for this camera - self.transcription_processor = AudioTranscriptionRealTimeProcessor( - config=self.config, - camera_config=self.camera_config, - requestor=self.requestor, - model_runner=self.audio_transcription_model_runner, - metrics=self.camera_metrics[self.camera_config.name], - stop_event=self.stop_event, - ) - - self.transcription_thread = threading.Thread( - target=self.transcription_processor.run, - name=f"{self.camera_config.name}_transcription_processor", - daemon=True, - ) - self.transcription_thread.start() - if self.camera_config.audio_transcription.enabled_in_config: # init the transcription processor for this camera self.transcription_processor = AudioTranscriptionRealTimeProcessor(