diff --git a/doc/dev/reference/components/container.rst b/doc/dev/reference/components/container.rst new file mode 100644 index 000000000..697a196f0 --- /dev/null +++ b/doc/dev/reference/components/container.rst @@ -0,0 +1,7 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +Container +^^^^^^^^^ + +.. autoclass:: plinth.container.Container + :members: diff --git a/doc/dev/reference/components/index.rst b/doc/dev/reference/components/index.rst index a88d433de..be6419544 100644 --- a/doc/dev/reference/components/index.rst +++ b/doc/dev/reference/components/index.rst @@ -22,6 +22,7 @@ Components staticfiles backups coturn + container Base Classes ^^^^^^^^^^^^ diff --git a/plinth/container.py b/plinth/container.py new file mode 100644 index 000000000..1f16985b7 --- /dev/null +++ b/plinth/container.py @@ -0,0 +1,143 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +"""Component to manage a container using podman.""" + +import contextlib + +from django.utils.translation import gettext_noop + +from plinth import app, privileged +from plinth.daemon import diagnose_port_listening +from plinth.diagnostic_check import (DiagnosticCheck, + DiagnosticCheckParameters, Result) + + +class Container(app.LeaderComponent): + """Component to manage a podman container.""" + + def __init__(self, component_id: str, name: str, image_name: str, + volume_name: str, volume_path: str, + volumes: dict[str, str] | None = None, + env: dict[str, str] | None = None, + binds_to: list[str] | None = None, + devices: dict[str, str] | None = None, + listen_ports: list[tuple[int, str]] | None = None): + """Initialize a container component. + + `name` is a string which is the name of the container to create and + manage. A systemd service unit with the same name is also created. + + `image_name` is a string that represents the repository location from + which the container images must be pull from. + + `volume_name` is a string with name of the storage volume to create for + the container to use. + + `volume_path` is a string path on the host machine where the volume + files for the container is stored. + + `volumes` is a dictionary mapping each string path on the host to a + string path inside the container. These are bind mounts made available + inside the container. + + `env` is a dictionary of string key to string values that set the + environment variables for the processes inside the container to run in. + + `binds_to` is a list of systemd service units that the container's own + systemd service unit will add BindsTo= and After= dependencies on. + + `devices` is a list of strings with device paths that will be made + available inside the container. If any of the devices don't exist on + the host, they will not be added. + + `listen_ports` is a list of tuples containing port number and 'tcp4' or + 'tcp6' network types on which this container is expected to listen on + after starting the container. This information is used to run + diagnostic checks on the container. + """ + super().__init__(component_id) + self.name = name + self.image_name = image_name + self.volume_name = volume_name + self.volume_path = volume_path + self.volumes = volumes + self.env = env + self.binds_to = binds_to + self.devices = devices + self.listen_ports = listen_ports or [] + + def is_enabled(self): + """Return if the container is enabled.""" + return privileged.container_is_enabled(self.name) + + def enable(self): + """Run operations to enable and run the container.""" + super().enable() + privileged.container_enable(self.name) + + def disable(self): + """Run operations to disable and stop the container.""" + super().disable() + privileged.container_disable(self.name) + + def is_running(self): + """Return whether the container service is running.""" + return privileged.is_running(self.name) + + @contextlib.contextmanager + def ensure_running(self): + """Ensure a service is running and return to previous state.""" + from plinth.privileged import service as service_privileged + starting_state = self.is_running() + if not starting_state: + service_privileged.enable(self.name) + + try: + yield starting_state + finally: + if not starting_state: + service_privileged.disable(self.name) + + def setup(self, old_version: int): + """Bring up and run the container.""" + # Determine whether app should be disabled after setup + should_disable = old_version and not self.is_enabled() + + privileged.container_setup(self.name, self.image_name, + self.volume_name, self.volume_path, + self.volumes, self.env, self.binds_to, + self.devices) + + if should_disable: + self.disable() + + def uninstall(self): + """Remove the container.""" + privileged.container_uninstall(self.name, self.image_name, + self.volume_name, self.volume_path) + + def diagnose(self) -> list[DiagnosticCheck]: + """Check if the container is running.. + + See :py:meth:`plinth.app.Component.diagnose`. + """ + results = [] + results.append(self._diagnose_unit_is_running()) + for port in self.listen_ports: + results.append( + diagnose_port_listening(port[0], port[1], None, + self.component_id)) + + return results + + def _diagnose_unit_is_running(self) -> DiagnosticCheck: + """Check if a daemon is running.""" + check_id = f'container-running-{self.name}' + result = Result.PASSED if self.is_running() else Result.FAILED + + description = gettext_noop('Container {container_name} is running') + parameters: DiagnosticCheckParameters = { + 'container_name': str(self.name) + } + + return DiagnosticCheck(check_id, description, result, parameters, + self.component_id) diff --git a/plinth/privileged/__init__.py b/plinth/privileged/__init__.py index fa2c5f88d..f71d4b940 100644 --- a/plinth/privileged/__init__.py +++ b/plinth/privileged/__init__.py @@ -2,6 +2,9 @@ """Package holding all the privileged actions outside of apps.""" from .config import dropin_is_valid, dropin_link, dropin_unlink +from .container import (container_disable, container_enable, + container_is_enabled, container_setup, + container_uninstall) from .packages import (filter_conffile_packages, install, is_package_manager_busy, remove, update) from .service import (disable, enable, is_enabled, is_running, mask, reload, @@ -13,5 +16,6 @@ __all__ = [ 'update', 'systemd_set_default', 'disable', 'enable', 'is_enabled', 'is_running', 'mask', 'reload', 'restart', 'start', 'stop', 'try_reload_or_restart', 'try_restart', 'unmask', 'dropin_is_valid', - 'dropin_link', 'dropin_unlink' + 'dropin_link', 'dropin_unlink', 'container_disable', 'container_enable', + 'container_is_enabled', 'container_setup', 'container_uninstall' ] diff --git a/plinth/privileged/container.py b/plinth/privileged/container.py new file mode 100644 index 000000000..14a3513f9 --- /dev/null +++ b/plinth/privileged/container.py @@ -0,0 +1,78 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +"""Handle container run using podman.""" + +from plinth import action_utils +from plinth import app as app_module +from plinth import module_loader +from plinth.actions import privileged + + +@privileged +def container_is_enabled(container: str) -> bool: + """Return whether a container is enabled.""" + _assert_container_is_managed(container) + return action_utils.podman_is_enabled(container) + + +@privileged +def container_enable(container: str): + """Enable a container so that it start on system boot.""" + _assert_container_is_managed(container) + action_utils.podman_enable(container) + action_utils.service_enable(container) + + +@privileged +def container_disable(container: str): + """Disable a container so that it does not start on system boot.""" + _assert_container_is_managed(container) + action_utils.service_disable(container) + action_utils.podman_disable(container) + + +@privileged +def container_setup(container: str, image_name: str, volume_name: str, + volume_path: str, volumes: dict[str, str] | None = None, + env: dict[str, str] | None = None, + binds_to: list[str] | None = None, + devices: dict[str, str] | None = None): + """Remove and recreate the podman container.""" + _assert_container_is_managed(container) + action_utils.podman_create(container, image_name, volume_name, volume_path, + volumes, env, binds_to, devices) + action_utils.service_start(container, check=True) + + +@privileged +def container_uninstall(container: str, image_name: str, volume_name: str, + volume_path: str): + """Remove podman container.""" + action_utils.podman_uninstall(container_name=container, + image_name=image_name, + volume_name=volume_name, + volume_path=volume_path) + + +def _get_managed_containers() -> set[str]: + """Get a set of all containers managed by FreedomBox.""" + from plinth.container import Container + + containers = set() + module_loader.load_modules() + app_module.apps_init() + for app in app_module.App.list(): + components = app.get_components_of_type(Container) + for component in components: + containers.add(component.name) + + return containers + + +def _assert_container_is_managed(container_name): + """Check that container is managed by one of the FreedomBox apps.""" + managed_containers = _get_managed_containers() + if container_name not in managed_containers: + msg = ("The container '%s' is not managed by FreedomBox. Access is " + "only permitted for containers listed in the Container " + "components of any FreedomBox app.") % container_name + raise ValueError(msg) diff --git a/plinth/privileged/service.py b/plinth/privileged/service.py index c299c98ac..28bfa2f6a 100644 --- a/plinth/privileged/service.py +++ b/plinth/privileged/service.py @@ -106,6 +106,8 @@ def is_running(service: str) -> bool: def _get_managed_services(): """Get a set of all services managed by FreedomBox.""" + from plinth.container import Container + services = set() module_loader.load_modules() app_module.apps_init() @@ -120,6 +122,10 @@ def _get_managed_services(): for component in components: services.add(component.unit) + components = app.get_components_of_type(Container) + for component in components: + services.add(component.name) + return services diff --git a/plinth/tests/test_container.py b/plinth/tests/test_container.py new file mode 100644 index 000000000..46da46a3d --- /dev/null +++ b/plinth/tests/test_container.py @@ -0,0 +1,188 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +"""Test component to manage a container using podman.""" + +from unittest.mock import call, patch + +import pytest + +from plinth.app import App, Info +from plinth.container import Container +from plinth.diagnostic_check import DiagnosticCheck, Result + +pytestmark = pytest.mark.usefixtures('mock_privileged') +privileged_modules_to_mock = [ + 'plinth.privileged', 'plinth.privileged.container', + 'plinth.privileged.service' +] + + +class AppTest(App): + """Test application that contains a daemon.""" + + app_id = 'test-app' + + +@pytest.fixture(name='container') +def fixture_container(): + app1 = AppTest() + app1.add(Info('test-app', 1)) + container = Container('test-container', 'name1', 'image:stable', 'volume1', + '/volume', {'/host1': '/cont1'}, {'KEY1': 'VAL1'}, + ['service1.service'], {'/dev/host1': '/dev/cont1'}, + [(1234, 'tcp4')]) + app1.add(container) + with patch('plinth.app.App.list') as app_list: + app_list.return_value = [app1] + yield container + + +def test_container_init(container): + """Test initializing the container component.""" + component = Container('test-container', 'name1', 'image:stable', 'volume1', + '/volume') + assert component.component_id == 'test-container' + assert component.name == 'name1' + assert component.image_name == 'image:stable' + assert component.volume_name == 'volume1' + assert component.volume_path == '/volume' + assert component.volumes is None + assert component.env is None + assert component.binds_to is None + assert component.devices is None + assert component.listen_ports == [] + + assert container.component_id == 'test-container' + assert container.name == 'name1' + assert container.image_name == 'image:stable' + assert container.volume_name == 'volume1' + assert container.volume_path == '/volume' + assert container.volumes == {'/host1': '/cont1'} + assert container.env == {'KEY1': 'VAL1'} + assert container.binds_to == ['service1.service'] + assert container.devices == {'/dev/host1': '/dev/cont1'} + assert container.listen_ports == [(1234, 'tcp4')] + + +@patch('plinth.action_utils.podman_is_enabled') +def test_container_is_enabled(podman_is_enabled, container): + """Test checking if container is enabled.""" + podman_is_enabled.return_value = False + assert not container.is_enabled() + + podman_is_enabled.return_value = True + assert container.is_enabled() + + +@patch('plinth.action_utils.service_enable') +@patch('plinth.action_utils.podman_enable') +def test_container_enable(podman_enable, enable, container): + """Test enabling a container component.""" + container.enable() + assert podman_enable.mock_calls == [call('name1')] + assert enable.mock_calls == [call('name1')] + + +@patch('plinth.action_utils.service_disable') +@patch('plinth.action_utils.podman_disable') +def test_container_disable(podman_disable, disable, container): + """Test disabling a container component.""" + container.disable() + assert podman_disable.mock_calls == [call('name1')] + assert disable.mock_calls == [call('name1')] + + +@patch('plinth.action_utils.service_is_running') +def test_container_is_running(service_is_running, container): + """Test checking of container component is running.""" + service_is_running.return_value = False + assert not container.is_running() + assert service_is_running.mock_calls == [call('name1')] + + service_is_running.reset_mock() + service_is_running.return_value = True + assert container.is_running() + + +@patch('plinth.action_utils.service_disable') +@patch('plinth.action_utils.service_enable') +@patch('plinth.action_utils.service_is_running') +def test_container_ensure_running(service_is_running, enable, disable, + container): + """Test checking of container component can be ensured to be running.""" + service_is_running.return_value = True + with container.ensure_running() as state: + assert state + assert enable.mock_calls == [] + + assert disable.mock_calls == [] + + service_is_running.return_value = False + with container.ensure_running() as state: + assert not state + assert enable.mock_calls == [call('name1')] + + assert disable.mock_calls == [call('name1')] + + +@patch('plinth.action_utils.service_disable') +@patch('plinth.action_utils.service_start') +@patch('plinth.action_utils.podman_disable') +@patch('plinth.action_utils.podman_is_enabled') +@patch('plinth.action_utils.podman_create') +def test_container_setup(podman_create, is_enabled, disable, service_start, + service_disable, container): + """Test setting up the container.""" + is_enabled.return_value = True + container.setup(0) + assert podman_create.mock_calls == [ + call('name1', 'image:stable', 'volume1', '/volume', + {'/host1': '/cont1'}, {'KEY1': 'VAL1'}, ['service1.service'], + {'/dev/host1': '/dev/cont1'}) + ] + assert service_start.mock_calls == [call('name1', check=True)] + assert disable.mock_calls == [] + + is_enabled.return_value = False + container.setup(0) + assert disable.mock_calls == [] + + is_enabled.return_value = False + container.setup(1) + assert disable.mock_calls == [call('name1')] + assert service_disable.mock_calls == [call('name1')] + + +@patch('plinth.action_utils.podman_uninstall') +def test_container_uninstall(podman_uninstall, container): + """Test uninstalling the container.""" + container.uninstall() + assert podman_uninstall.mock_calls == [ + call(container_name='name1', image_name='image:stable', + volume_name='volume1', volume_path='/volume') + ] + + +@patch('plinth.action_utils.service_is_running') +@patch('plinth.container.diagnose_port_listening') +def test_container_diagnose(diagnose_port_listening, service_is_running, + container): + """Test diagnosing the container.""" + expected_results = [ + DiagnosticCheck('container-running-name1', + 'Container {container_name} is running', Result.PASSED, + {'container_name': 'name1'}, 'test-container'), + DiagnosticCheck('daemon-listening-tcp4-1234', + 'Listening on tcp4 port 1234', Result.PASSED, { + 'kind': 'tcp4', + 'port': 1234 + }, 'test-container'), + ] + diagnose_port_listening.return_value = expected_results[1] + service_is_running.return_value = True + results = container.diagnose() + assert results == expected_results + + service_is_running.return_value = False + expected_results[0].result = Result.FAILED + results = container.diagnose() + assert results == expected_results