diff --git a/rock/actions/sandbox/response.py b/rock/actions/sandbox/response.py index 3fda9ee2d..806eb3296 100644 --- a/rock/actions/sandbox/response.py +++ b/rock/actions/sandbox/response.py @@ -47,6 +47,8 @@ class SandboxStatusResponse(BaseModel): namespace: str | None = None cpus: float | None = None memory: str | None = None + limit_disk_rootfs: str | None = None + limit_disk_log: str | None = None class CommandResponse(BaseModel): diff --git a/rock/actions/sandbox/sandbox_info.py b/rock/actions/sandbox/sandbox_info.py index a6a28816a..92d691e17 100644 --- a/rock/actions/sandbox/sandbox_info.py +++ b/rock/actions/sandbox/sandbox_info.py @@ -21,6 +21,8 @@ class SandboxInfo(TypedDict, total=False): create_user_gray_flag: bool cpus: float memory: str + limit_disk_rootfs: str + limit_disk_log: str create_time: str start_time: str stop_time: str diff --git a/rock/admin/entrypoints/sandbox_api.py b/rock/admin/entrypoints/sandbox_api.py index 277cc39ea..93182d2cf 100644 --- a/rock/admin/entrypoints/sandbox_api.py +++ b/rock/admin/entrypoints/sandbox_api.py @@ -27,6 +27,8 @@ from rock.admin.proto.response import SandboxStartResponse from rock.common.constants import ( CPU_PREEMPT_SWITCH, + SANDBOX_LIMIT_DISK_LOG_KEY, + SANDBOX_LIMIT_DISK_ROOTFS_KEY, GET_STATUS_SWITCH, KATA_DIND_DISK_SIZE_KEY, KATA_RUNTIME_SWITCH, @@ -69,6 +71,29 @@ async def _apply_kata_disk_size(config: DockerDeploymentConfig) -> None: config.kata_disk_size = disk_size +async def _apply_disk_limits(config: DockerDeploymentConfig) -> None: + """Apply disk limits from RuntimeConfig (rock-xxx.yml), overridable by Nacos at runtime. + + Priority: Nacos > RuntimeConfig (rock-xxx.yml). None in both means no limit. + """ + runtime = sandbox_manager.rock_config.runtime + nacos = sandbox_manager.rock_config.nacos_provider + + limit_disk_rootfs = runtime.sandbox_limit_disk_rootfs + limit_disk_log = runtime.sandbox_limit_disk_log + + if nacos is not None: + nacos_rootfs = await nacos.get_config_value(SANDBOX_LIMIT_DISK_ROOTFS_KEY) + if nacos_rootfs: + limit_disk_rootfs = nacos_rootfs + nacos_log = await nacos.get_config_value(SANDBOX_LIMIT_DISK_LOG_KEY) + if nacos_log: + limit_disk_log = nacos_log + + config.limit_disk_rootfs = limit_disk_rootfs + config.limit_disk_log = limit_disk_log + + async def _apply_cpu_preempt_switch(config: DockerDeploymentConfig) -> None: """Check nacos switch and enable CPU preemption on the config if the switch is on. @@ -89,6 +114,7 @@ async def start(request: SandboxStartRequest) -> RockResponse[SandboxStartRespon await _apply_kata_runtime_switch(config) await _apply_kata_disk_size(config) await _apply_cpu_preempt_switch(config) + await _apply_disk_limits(config) sandbox_start_response = await sandbox_manager.start(config) return RockResponse(result=sandbox_start_response) @@ -103,6 +129,7 @@ async def start_async( await _apply_kata_runtime_switch(config) await _apply_kata_disk_size(config) await _apply_cpu_preempt_switch(config) + await _apply_disk_limits(config) sandbox_start_response = await sandbox_manager.start_async( config, user_info=headers.user_info, diff --git a/rock/admin/proto/response.py b/rock/admin/proto/response.py index a59df09d5..5d5771385 100644 --- a/rock/admin/proto/response.py +++ b/rock/admin/proto/response.py @@ -11,6 +11,8 @@ class SandboxStartResponse(SandboxResponse): host_ip: str | None = None cpus: float | None = None memory: str | None = None + limit_disk_rootfs: str | None = None + limit_disk_log: str | None = None # TODO: inherit from SandboxStartResponse @@ -30,6 +32,8 @@ class SandboxStatusResponse(BaseModel): namespace: str | None = None cpus: float | None = None memory: str | None = None + limit_disk_rootfs: str | None = None + limit_disk_log: str | None = None @classmethod def from_sandbox_info(cls, sandbox_info: "SandboxInfo") -> "SandboxStatusResponse": @@ -46,6 +50,8 @@ def from_sandbox_info(cls, sandbox_info: "SandboxInfo") -> "SandboxStatusRespons namespace=sandbox_info.get("namespace"), cpus=sandbox_info.get("cpus"), memory=sandbox_info.get("memory"), + limit_disk_rootfs=sandbox_info.get("limit_disk_rootfs"), + limit_disk_log=sandbox_info.get("limit_disk_log"), ) diff --git a/rock/common/constants.py b/rock/common/constants.py index d7fa78fc5..fce3acfe8 100644 --- a/rock/common/constants.py +++ b/rock/common/constants.py @@ -5,6 +5,8 @@ SUPPORT_KATA_SWITCH = "support_kata_enabled" CPU_PREEMPT_SWITCH = "cpu_preempt_enabled" KATA_DIND_DISK_SIZE_KEY = "kata_dind_disk_size" +SANDBOX_LIMIT_DISK_ROOTFS_KEY = "sandbox_limit_disk_rootfs" +SANDBOX_LIMIT_DISK_LOG_KEY = "sandbox_limit_disk_log" PID_PREFIX = "PIDSTART" PID_SUFFIX = "PIDEND" SCHEDULER_LOG_NAME = "scheduler.log" diff --git a/rock/config.py b/rock/config.py index f43952732..e67777cae 100644 --- a/rock/config.py +++ b/rock/config.py @@ -159,6 +159,10 @@ class RuntimeConfig: use_standard_spec_only: bool = False metrics_endpoint: str = "" user_defined_tags: dict = field(default_factory=dict) + sandbox_limit_disk_rootfs: str | None = None + """Default rootfs quota per container. None means no limit. Can be overridden by nacos key 'default_limit_disk'.""" + sandbox_limit_disk_log: str | None = None + """Default log-dir quota per container. None means no limit. Can be overridden by nacos key 'default_log_dir_quota'.""" def __post_init__(self) -> None: # Convert dict to StandardSpec if needed diff --git a/rock/deployments/config.py b/rock/deployments/config.py index 57458b183..9288a20e8 100644 --- a/rock/deployments/config.py +++ b/rock/deployments/config.py @@ -93,6 +93,12 @@ class DockerDeploymentConfig(DeploymentConfig): limit_cpus: float | None = None """Hard limit on the number of CPU cores the container can use. Used as --cpus when CPU preemption is enabled via nacos switch.""" + limit_disk_rootfs: str | None = None + """Maximum rootfs disk size for the container (e.g., '20g', '50g'). Maps to --storage-opt size=. Only supported on overlay2 storage driver with xfs backing filesystem. None means no limit.""" + + limit_disk_log: str | None = None + """XFS project quota for the sandbox log directory. Server-side only, applied via xfs_quota. None means no limit.""" + container_name: str | None = None """Custom name for the container. If None, a random name will be generated.""" diff --git a/rock/deployments/docker.py b/rock/deployments/docker.py index 67246ddee..2c945eedd 100644 --- a/rock/deployments/docker.py +++ b/rock/deployments/docker.py @@ -1,5 +1,6 @@ import asyncio import datetime +import hashlib import os import random import shlex @@ -47,6 +48,7 @@ class DockerDeployment(AbstractDeployment): + def __init__( self, **kwargs: Any, @@ -57,6 +59,8 @@ def __init__( **kwargs: Keyword arguments (see `DockerDeploymentConfig` for details). """ self._config = DockerDeploymentConfig(**kwargs) + self._effective_limit_disk_rootfs: str | None = self._config.limit_disk_rootfs + self._effective_limit_disk_log: str | None = self._config.limit_disk_log self._runtime: RemoteSandboxRuntime | None = None self._container_process = None self._runtime_timeout = 0.15 @@ -350,11 +354,82 @@ def _cpus(self): return [f"--cpu-shares={cpu_shares}", f"--cpus={self.config.limit_cpus}"] return [f"--cpus={self.config.cpus}"] + def _storage_opts(self): + if self._effective_limit_disk_rootfs is not None: + return ["--storage-opt", f"size={self._effective_limit_disk_rootfs}"] + return [] + + def _try_set_log_dir_quota(self, log_file_path: str) -> None: + """Best-effort: set XFS project quota for sandbox log directory. + + Requires the log path to be on an XFS mount with prjquota/pquota enabled. + This check is independent of Docker's storage driver (no overlay2 requirement). + """ + if self._effective_limit_disk_log is None: + return + + if not DockerUtil.is_xfs_prjquota_path(log_file_path): + logger.info(f"Log path {log_file_path!r} is not on XFS+prjquota, skipping quota setup") + self._effective_limit_disk_log = None + return + + # Derive a deterministic project id from container name; reserve low ids. + project_id = (int(hashlib.sha1(self.container_name.encode("utf-8")).hexdigest()[:8], 16) % 900000) + 100000 + try: + findmnt_result = subprocess.run( + ["findmnt", "-T", log_file_path, "-o", "TARGET", "--noheadings"], + capture_output=True, + text=True, + timeout=5, + ) + if findmnt_result.returncode != 0: + logger.warning(f"Failed to find mountpoint for log path {log_file_path!r}, skip quota setup") + self._effective_limit_disk_log = None + return + mount_point = findmnt_result.stdout.strip() + if not mount_point: + logger.warning(f"Empty mountpoint for log path {log_file_path!r}, skip quota setup") + self._effective_limit_disk_log = None + return + + set_project_cmd = f"project -s -p {shlex.quote(log_file_path)} {project_id}" + set_limit_cmd = f"limit -p bhard={self._effective_limit_disk_log} {project_id}" + for cmd in (set_project_cmd, set_limit_cmd): + result = subprocess.run( + ["xfs_quota", "-x", "-c", cmd, mount_point], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode != 0: + logger.warning( + f"xfs_quota failed for {log_file_path!r} with cmd={cmd!r}: {result.stderr.strip() or result.stdout.strip()}" + ) + self._effective_limit_disk_log = None + return + logger.info(f"Set XFS project quota {self._effective_limit_disk_log} for log path {log_file_path!r}") + except Exception as e: + logger.warning(f"Failed to set XFS project quota for {log_file_path!r}: {e}") + self._effective_limit_disk_log = None + async def start(self): """Starts the runtime.""" if not self.sandbox_validator.check_availability(): raise Exception("Docker is not available") + storage_opt_supported = DockerUtil.detect_storage_opt_support() + # Resolve effective rootfs quota: downgrade to None if storage-opt is not supported. + if self._config.limit_disk_rootfs is not None and not storage_opt_supported: + logger.warning( + f"[{self.config.container_name}] --storage-opt not supported on this worker " + f"(requires overlay2 + xfs + prjquota), ignoring limit_disk_rootfs={self._config.limit_disk_rootfs}" + ) + self._effective_limit_disk_rootfs = None + else: + self._effective_limit_disk_rootfs = self._config.limit_disk_rootfs + # Resolve effective log quota; _try_set_log_dir_quota will downgrade to None if XFS+prjquota is unavailable. + self._effective_limit_disk_log = self._config.limit_disk_log + if self._container_name is None: self.set_container_name(self._get_container_name()) self._service_status.set_sandbox_id(self._container_name) @@ -385,6 +460,7 @@ async def start(self): log_file_path = f"{env_vars.ROCK_LOGGING_PATH}/{self.container_name}" os.makedirs(log_file_path, exist_ok=True) os.chmod(log_file_path, 0o777) + self._try_set_log_dir_quota(log_file_path) volume_args.extend(["-v", f"{log_file_path}:{env_vars.ROCK_LOGGING_PATH}"]) env_arg = [ "-e", @@ -421,6 +497,7 @@ async def start(self): f"{self._service_status.get_mapped_port(Port.SSH)}:22", *self._memory(), *self._cpus(), + *self._storage_opts(), *platform_arg, *self._config.docker_args, "--name", @@ -554,6 +631,16 @@ def config(self) -> DockerDeploymentConfig: """Returns the config of the deployment.""" return self._config + @property + def effective_limit_disk_rootfs(self) -> str | None: + """Returns the actual rootfs quota in effect after runtime capability checks (may differ from config.limit_disk_rootfs).""" + return self._effective_limit_disk_rootfs + + @property + def effective_limit_disk_log(self) -> str | None: + """Returns the actual log-dir quota in effect after runtime capability checks (may differ from config.limit_disk_log).""" + return self._effective_limit_disk_log + async def _check_stop(self): logger.info(f"Start check container to stop: {self._container_name}") try: diff --git a/rock/sandbox/sandbox_actor.py b/rock/sandbox/sandbox_actor.py index a130d0072..527796ca8 100644 --- a/rock/sandbox/sandbox_actor.py +++ b/rock/sandbox/sandbox_actor.py @@ -128,6 +128,8 @@ async def start(self): logger.error(f"[{self._config.container_name}] start deployment failed: {ex}", exc_info=True) raise ex if isinstance(self._deployment, DockerDeployment): + self._config.limit_disk_rootfs = self._deployment.effective_limit_disk_rootfs + self._config.limit_disk_log = self._deployment.effective_limit_disk_log self._clean_container_background() await self._setup_monitor() @@ -274,5 +276,7 @@ async def sandbox_info(self) -> SandboxInfo: "namespace": await self.namespace(), "cpus": self._config.cpus, "memory": self._config.memory, + "limit_disk_rootfs": self._config.limit_disk_rootfs, + "limit_disk_log": self._config.limit_disk_log, } return {} diff --git a/rock/sandbox/sandbox_manager.py b/rock/sandbox/sandbox_manager.py index 3dbe2652d..56b4cd1c8 100644 --- a/rock/sandbox/sandbox_manager.py +++ b/rock/sandbox/sandbox_manager.py @@ -242,6 +242,8 @@ async def get_status(self, sandbox_id) -> SandboxStatusResponse: namespace=sandbox_info.get("namespace"), cpus=sandbox_info.get("cpus"), memory=sandbox_info.get("memory"), + limit_disk_rootfs=sandbox_info.get("limit_disk_rootfs"), + limit_disk_log=sandbox_info.get("limit_disk_log"), ) async def build_sandbox_info_from_redis(self, sandbox_id: str, deployment_info: SandboxInfo) -> SandboxInfo | None: @@ -376,3 +378,11 @@ def validate_sandbox_spec(self, runtime_config: RuntimeConfig, deployment_config except ValueError as e: logger.warning(f"Invalid memory size: {deployment_config.memory}", exc_info=e) raise BadRequestRockError(f"Invalid memory size: {deployment_config.memory}") + + # Validate limit_disk_rootfs format + if deployment_config.limit_disk_rootfs is not None: + try: + parse_size_to_bytes(deployment_config.limit_disk_rootfs) + except ValueError as e: + logger.warning(f"Invalid limit_disk_rootfs size: {deployment_config.limit_disk_rootfs}", exc_info=e) + raise BadRequestRockError(f"Invalid limit_disk_rootfs size: {deployment_config.limit_disk_rootfs}") diff --git a/rock/utils/docker.py b/rock/utils/docker.py index 6db0e747b..c9d9c6923 100644 --- a/rock/utils/docker.py +++ b/rock/utils/docker.py @@ -1,3 +1,4 @@ +import json import logging import subprocess @@ -7,6 +8,110 @@ class DockerUtil: """Docker operation utilities""" + @classmethod + def get_docker_info(cls) -> dict | None: + """Run 'docker info' and return the parsed JSON, or None on failure.""" + try: + result = subprocess.run( + ["docker", "info", "--format", "{{json .}}"], + capture_output=True, + text=True, + timeout=10, + ) + if result.returncode != 0: + logger.warning("get_docker_info: docker info failed") + return None + return json.loads(result.stdout) + except Exception as e: + logger.warning(f"get_docker_info: failed: {e}") + return None + + @classmethod + def get_docker_root_dir(cls) -> str | None: + """Return DockerRootDir from docker info, or None on failure.""" + info = cls.get_docker_info() + if info is None: + return None + root = info.get("DockerRootDir") + if not root: + logger.warning("get_docker_root_dir: DockerRootDir not found in docker info") + return root or None + + @classmethod + def is_xfs_prjquota_path(cls, path: str) -> bool: + """Return True if *path* is on an XFS mount with prjquota (or pquota) enabled. + + This is the prerequisite for XFS project quota on a directory. + Unlike detect_storage_opt_support(), this check is independent of Docker's + storage driver configuration. + """ + try: + result = subprocess.run( + ["findmnt", "-T", path, "-o", "FSTYPE,OPTIONS", "--noheadings"], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode != 0: + logger.info(f"is_xfs_prjquota_path: findmnt failed for {path!r}") + return False + line = result.stdout.strip() + if not line: + logger.info(f"is_xfs_prjquota_path: empty findmnt output for {path!r}") + return False + parts = line.split(None, 1) # split on first whitespace: FSTYPE OPTIONS + if len(parts) < 2: + logger.info(f"is_xfs_prjquota_path: unexpected findmnt output for {path!r}: {line!r}") + return False + fstype, options = parts[0], parts[1] + if fstype != "xfs": + logger.info(f"is_xfs_prjquota_path: {path!r} is on {fstype!r}, not xfs") + return False + opts = options.split(",") + has_prjquota = "prjquota" in opts or "pquota" in opts + if not has_prjquota: + logger.info( + f"is_xfs_prjquota_path: {path!r} is xfs but mount options {options!r} " + f"missing prjquota/pquota" + ) + return has_prjquota + except Exception as e: + logger.info(f"is_xfs_prjquota_path: findmnt command failed for {path!r}: {e}") + return False + + @classmethod + def detect_storage_opt_support(cls) -> bool: + """Detect whether --storage-opt size= is supported in this environment. + + Requirements: + - Docker storage driver is overlay2 + - Docker root directory is on an XFS mount with prjquota/pquota enabled + (checked via is_xfs_prjquota_path) + + Returns: + True if --storage-opt size= can be used, False otherwise. + """ + info = cls.get_docker_info() + if info is None: + return False + + # Check 1: Driver must be overlay2 + if info.get("Driver") != "overlay2": + logger.info(f"detect_storage_opt_support: storage driver is {info.get('Driver')!r}, not overlay2") + return False + + # Check 2: DockerRootDir must be on XFS with prjquota/pquota + docker_root = info.get("DockerRootDir") + if not docker_root: + logger.warning("detect_storage_opt_support: DockerRootDir not found in docker info") + return False + + if not cls.is_xfs_prjquota_path(docker_root): + return False + + logger.info(f"detect_storage_opt_support: supported — overlay2, {docker_root!r} is xfs+prjquota") + return True + @classmethod def is_docker_available(cls): try: diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 40e69b6b3..3a8cf4c64 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -31,6 +31,18 @@ reason=f"Requires Docker and image {env_vars.ROCK_ENVHUB_DEFAULT_DOCKER_IMAGE}", ) +SKIP_IF_NO_STORAGE_OPT = pytest.mark.skipif( + not DockerUtil.detect_storage_opt_support(), + reason="Requires Docker with storage-opt support (overlay2 + xfs + prjquota/pquota)", +) + +_log_path = env_vars.ROCK_LOGGING_PATH or "" + +SKIP_IF_LOG_PATH_NOT_XFS = pytest.mark.skipif( + not _log_path or not DockerUtil.is_xfs_prjquota_path(_log_path), + reason=f"ROCK_LOGGING_PATH ({_log_path!r}) is not set or not on XFS with prjquota, skipping log quota test", +) + @dataclass class RemoteServer: diff --git a/tests/integration/sdk/sandbox/test_disk_limit.py b/tests/integration/sdk/sandbox/test_disk_limit.py new file mode 100644 index 000000000..c17e52b12 --- /dev/null +++ b/tests/integration/sdk/sandbox/test_disk_limit.py @@ -0,0 +1,231 @@ +"""Integration tests for disk limit functionality.""" + +import pytest + +from rock.actions import Command +from rock.sdk.sandbox.client import Sandbox +from rock.sdk.sandbox.config import SandboxConfig +from rock.utils.docker import DockerUtil +from tests.integration.conftest import SKIP_IF_LOG_PATH_NOT_XFS, SKIP_IF_NO_DOCKER, SKIP_IF_NO_STORAGE_OPT + + +@pytest.mark.need_admin +@SKIP_IF_NO_DOCKER +@SKIP_IF_NO_STORAGE_OPT +@pytest.mark.asyncio +async def test_disk_limit_enforcement(admin_remote_server): + """Test that the server-side rootfs disk limit is enforced when storage-opt is supported. + + This test is only run when storage-opt is supported (overlay2 + xfs + prjquota). + + Steps: + 1. Start a sandbox (server applies default limit_disk) + 2. Check sandbox status to verify limit_disk is reported + 3. Try to create a file larger than the limit (should fail) + 4. Create a small file (should succeed) + """ + config = SandboxConfig( + image="ubuntu:22.04", + memory="2g", + cpus=1.0, + base_url=f"{admin_remote_server.endpoint}:{admin_remote_server.port}", + startup_timeout=60, + ) + + sandbox = Sandbox(config) + await sandbox.start() + + try: + status = await sandbox.get_status() + print(f"Sandbox status: limit_disk={status.limit_disk_rootfs}") + + if status.limit_disk_rootfs is None: + pytest.skip("Server has no disk limit configured (sandbox_limit_disk_rootfs not set in rock-xxx.yml or nacos)") + print(f"✅ Disk limit is set to {status.limit_disk_rootfs}") + + # Parse limit to determine a file size that exceeds it + result = await sandbox.execute( + Command( + command=[ + "/bin/bash", + "-c", + f"fallocate -l {status.limit_disk_rootfs.replace('g', '')}G /tmp/large_file.bin 2>&1 || echo 'EXPECTED_ERROR'", + ] + ) + ) + + output = result.stdout + result.stderr + print(f"fallocate output: {output}") + + error_occurred = ( + result.exit_code != 0 + or "No space left on device" in output + or "fallocate failed" in output + or "EXPECTED_ERROR" in output + ) + + assert error_occurred, ( + f"Expected disk space error when filling disk, " + f"but got exit_code={result.exit_code}, output={output}" + ) + print("✅ Disk limit enforcement verified") + + small_file_result = await sandbox.execute( + Command(command=["/bin/bash", "-c", "fallocate -l 100M /tmp/small_file.bin && echo 'SUCCESS'"]) + ) + + small_output = small_file_result.stdout + small_file_result.stderr + assert small_file_result.exit_code == 0, ( + f"Expected small file (100MB) creation to succeed, " + f"but got exit_code={small_file_result.exit_code}" + ) + assert "SUCCESS" in small_output + print("✅ Small file (100MB) creation succeeded") + + finally: + await sandbox.stop() + + +@pytest.mark.need_admin +@SKIP_IF_NO_DOCKER +@pytest.mark.asyncio +async def test_disk_limit_default_value(admin_remote_server): + """Test that the server applies a default limit_disk visible in status.""" + config = SandboxConfig( + image="ubuntu:22.04", + memory="2g", + cpus=1.0, + base_url=f"{admin_remote_server.endpoint}:{admin_remote_server.port}", + startup_timeout=60, + ) + + sandbox = Sandbox(config) + await sandbox.start() + + try: + status = await sandbox.get_status() + print(f"Sandbox status: limit_disk={status.limit_disk_rootfs}") + + storage_opt_supported = DockerUtil.detect_storage_opt_support() + + if not storage_opt_supported: + assert status.limit_disk_rootfs is None, ( + f"Expected limit_disk=None when storage-opt not supported, got {status.limit_disk_rootfs}" + ) + print("✅ Storage-opt not supported: limit_disk is None") + else: + # When storage-opt is supported, limit_disk reflects server config (may be None if not configured) + print(f"✅ Server-reported limit_disk: {status.limit_disk_rootfs}") + + finally: + await sandbox.stop() + + +@pytest.mark.need_admin +@SKIP_IF_NO_DOCKER +@SKIP_IF_NO_STORAGE_OPT +@SKIP_IF_LOG_PATH_NOT_XFS +@pytest.mark.asyncio +async def test_logging_path_disk_limit_enforcement(admin_remote_server): + """Test that ROCK_LOGGING_PATH is also limited by disk quota. + + This test verifies that the log directory (ROCK_LOGGING_PATH) has a quota + enforced via XFS project quota, separate from the rootfs limit. + + Steps: + 1. Start a sandbox (server applies default log dir quota) + 2. Check that ROCK_LOGGING_PATH env var is set in container + 3. Try to create a file larger than the log quota (should fail) + 4. Create a 500MB file in ROCK_LOGGING_PATH (should succeed) + 5. Verify rootfs and log directory are independently limited + """ + config = SandboxConfig( + image="ubuntu:22.04", + memory="2g", + cpus=1.0, + base_url=f"{admin_remote_server.endpoint}:{admin_remote_server.port}", + startup_timeout=60, + ) + + sandbox = Sandbox(config) + await sandbox.start() + + try: + env_result = await sandbox.execute( + Command(command=["/bin/bash", "-c", "echo $ROCK_LOGGING_PATH"]) + ) + logging_path = env_result.stdout.strip() + print(f"ROCK_LOGGING_PATH in container: {logging_path}") + + assert logging_path, "ROCK_LOGGING_PATH should be set in container" + print(f"✅ ROCK_LOGGING_PATH is set to: {logging_path}") + + # Try to create a 1.5GB file in logging path (should fail due to log quota) + large_log_result = await sandbox.execute( + Command( + command=[ + "/bin/bash", + "-c", + f"fallocate -l 1500M {logging_path}/large_log.bin 2>&1 || echo 'EXPECTED_ERROR'", + ] + ) + ) + + large_output = large_log_result.stdout + large_log_result.stderr + print(f"Large log file creation output: {large_output}") + print(f"Large log file creation exit_code: {large_log_result.exit_code}") + + error_occurred = ( + large_log_result.exit_code != 0 + or "No space left on device" in large_output + or "Disk quota exceeded" in large_output + or "fallocate failed" in large_output + or "EXPECTED_ERROR" in large_output + ) + + assert error_occurred, ( + f"Expected disk quota error when creating 1.5GB file in log dir, " + f"but got exit_code={large_log_result.exit_code}, output={large_output}" + ) + print("✅ Log directory quota verified: 1.5GB file creation failed as expected") + + small_log_result = await sandbox.execute( + Command( + command=[ + "/bin/bash", + "-c", + f"fallocate -l 500M {logging_path}/small_log.bin && echo 'SUCCESS'", + ] + ) + ) + + small_output = small_log_result.stdout + small_log_result.stderr + print(f"Small log file creation output: {small_output}") + assert small_log_result.exit_code == 0, ( + f"Expected small log file (500MB) creation to succeed, " + f"but got exit_code={small_log_result.exit_code}, output={small_output}" + ) + assert "SUCCESS" in small_output + print("✅ Small log file (500MB) creation in ROCK_LOGGING_PATH succeeded") + + rootfs_result = await sandbox.execute( + Command( + command=[ + "/bin/bash", + "-c", + "fallocate -l 1G /tmp/rootfs_file.bin && echo 'ROOTFS_SUCCESS'", + ] + ) + ) + + rootfs_output = rootfs_result.stdout + rootfs_result.stderr + print(f"Rootfs file creation output: {rootfs_output}") + assert rootfs_result.exit_code == 0, ( + f"Expected 1GB file on rootfs to succeed, " + f"but got exit_code={rootfs_result.exit_code}, output={rootfs_output}" + ) + assert "ROOTFS_SUCCESS" in rootfs_output + print("✅ Rootfs and log directory are independently limited") + + finally: + await sandbox.stop() diff --git a/tests/unit/admin/proto/test_sandbox_response.py b/tests/unit/admin/proto/test_sandbox_response.py new file mode 100644 index 000000000..6e38865fc --- /dev/null +++ b/tests/unit/admin/proto/test_sandbox_response.py @@ -0,0 +1,143 @@ +""" +Unit tests for admin proto response models — limit_disk_rootfs and limit_disk_log fields. + +Tests cover: +- SandboxStartResponse.limit_disk_rootfs / limit_disk_log fields +- SandboxStatusResponse.limit_disk_rootfs / limit_disk_log fields +- SandboxStatusResponse.from_sandbox_info() extraction of both fields +""" + +from rock.admin.proto.response import SandboxStartResponse, SandboxStatusResponse + + +# ---- SandboxStartResponse tests ---- + + +class TestSandboxStartResponseDiskLimit: + def test_limit_disk_rootfs_default_is_none(self): + response = SandboxStartResponse() + assert response.limit_disk_rootfs is None + + def test_limit_disk_log_default_is_none(self): + response = SandboxStartResponse() + assert response.limit_disk_log is None + + def test_limit_disk_rootfs_set_value(self): + response = SandboxStartResponse(limit_disk_rootfs="20g") + assert response.limit_disk_rootfs == "20g" + + def test_limit_disk_log_set_value(self): + response = SandboxStartResponse(limit_disk_log="5g") + assert response.limit_disk_log == "5g" + + def test_all_fields_with_both_limits(self): + response = SandboxStartResponse( + sandbox_id="test-sandbox", + host_ip="10.0.0.1", + cpus=4.0, + memory="16g", + limit_disk_rootfs="50g", + limit_disk_log="5g", + ) + assert response.sandbox_id == "test-sandbox" + assert response.limit_disk_rootfs == "50g" + assert response.limit_disk_log == "5g" + assert response.cpus == 4.0 + assert response.memory == "16g" + + +# ---- SandboxStatusResponse tests ---- + + +class TestSandboxStatusResponseDiskLimit: + def test_limit_disk_rootfs_default_is_none(self): + response = SandboxStatusResponse() + assert response.limit_disk_rootfs is None + + def test_limit_disk_log_default_is_none(self): + response = SandboxStatusResponse() + assert response.limit_disk_log is None + + def test_limit_disk_rootfs_set_value(self): + response = SandboxStatusResponse(limit_disk_rootfs="20g") + assert response.limit_disk_rootfs == "20g" + + def test_limit_disk_log_set_value(self): + response = SandboxStatusResponse(limit_disk_log="5g") + assert response.limit_disk_log == "5g" + + def test_from_sandbox_info_with_both_limits(self): + """from_sandbox_info() should extract both limit fields from SandboxInfo dict.""" + sandbox_info = { + "sandbox_id": "test-sandbox", + "phases": {}, + "port_mapping": {}, + "host_ip": "10.0.0.1", + "cpus": 2.0, + "memory": "8g", + "limit_disk_rootfs": "30g", + "limit_disk_log": "5g", + } + response = SandboxStatusResponse.from_sandbox_info(sandbox_info) + assert response.limit_disk_rootfs == "30g" + assert response.limit_disk_log == "5g" + assert response.cpus == 2.0 + assert response.memory == "8g" + + def test_from_sandbox_info_without_limits(self): + """from_sandbox_info() should yield None for both when absent.""" + sandbox_info = { + "sandbox_id": "test-sandbox", + "phases": {}, + "port_mapping": {}, + "cpus": 2.0, + "memory": "8g", + } + response = SandboxStatusResponse.from_sandbox_info(sandbox_info) + assert response.limit_disk_rootfs is None + assert response.limit_disk_log is None + + def test_from_sandbox_info_with_none_limits(self): + """from_sandbox_info() should surface None when fields are explicitly None.""" + sandbox_info = { + "sandbox_id": "test-sandbox", + "phases": {}, + "port_mapping": {}, + "limit_disk_rootfs": None, + "limit_disk_log": None, + } + response = SandboxStatusResponse.from_sandbox_info(sandbox_info) + assert response.limit_disk_rootfs is None + assert response.limit_disk_log is None + + def test_from_sandbox_info_partial_limits(self): + """from_sandbox_info() handles one field set, one absent.""" + sandbox_info = { + "sandbox_id": "test-sandbox", + "phases": {}, + "port_mapping": {}, + "limit_disk_rootfs": "50g", + } + response = SandboxStatusResponse.from_sandbox_info(sandbox_info) + assert response.limit_disk_rootfs == "50g" + assert response.limit_disk_log is None + + +# ---- actions/sandbox/response.SandboxStatusResponse tests ---- + + +class TestActionsSandboxStatusResponseDiskLimit: + def test_actions_status_response_both_limits(self): + """rock.actions.sandbox.response.SandboxStatusResponse should have both limit fields.""" + from rock.actions.sandbox.response import SandboxStatusResponse as ActionStatusResponse + + response = ActionStatusResponse(limit_disk_rootfs="20g", limit_disk_log="5g") + assert response.limit_disk_rootfs == "20g" + assert response.limit_disk_log == "5g" + + def test_actions_status_response_defaults_none(self): + from rock.actions.sandbox.response import SandboxStatusResponse as ActionStatusResponse + + response = ActionStatusResponse() + assert response.limit_disk_rootfs is None + assert response.limit_disk_log is None diff --git a/tests/unit/deployments/test_docker_deployment_disk_limit.py b/tests/unit/deployments/test_docker_deployment_disk_limit.py new file mode 100644 index 000000000..4d85669a9 --- /dev/null +++ b/tests/unit/deployments/test_docker_deployment_disk_limit.py @@ -0,0 +1,246 @@ +""" +Unit tests for disk_limit support in DockerDeployment and DockerDeploymentConfig. + +Tests cover: +- DockerDeploymentConfig default and custom limit_disk_rootfs / limit_disk_log values +- DockerDeployment._storage_opts() argument generation +- DockerDeployment.start() graceful degradation when storage-opt is unsupported +""" + +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from rock.deployments.config import DockerDeploymentConfig +from rock.deployments.docker import DockerDeployment + + +# ---- DockerDeploymentConfig tests ---- + + +class TestDockerDeploymentConfigDiskLimit: + def test_default_limit_disk_rootfs_is_none(self): + config = DockerDeploymentConfig() + assert config.limit_disk_rootfs is None + + def test_default_limit_disk_log_is_none(self): + config = DockerDeploymentConfig() + assert config.limit_disk_log is None + + def test_custom_limit_disk_rootfs(self): + config = DockerDeploymentConfig(limit_disk_rootfs="50g") + assert config.limit_disk_rootfs == "50g" + + def test_custom_limit_disk_log(self): + config = DockerDeploymentConfig(limit_disk_log="5g") + assert config.limit_disk_log == "5g" + + def test_limit_disk_rootfs_none(self): + config = DockerDeploymentConfig(limit_disk_rootfs=None) + assert config.limit_disk_rootfs is None + + def test_limit_disk_log_none(self): + config = DockerDeploymentConfig(limit_disk_log=None) + assert config.limit_disk_log is None + + def test_limit_disk_rootfs_preserved_in_model_dump(self): + config = DockerDeploymentConfig(limit_disk_rootfs="50g") + dump = config.model_dump() + assert dump["limit_disk_rootfs"] == "50g" + + def test_limit_disk_log_preserved_in_model_dump(self): + config = DockerDeploymentConfig(limit_disk_log="5g") + dump = config.model_dump() + assert dump["limit_disk_log"] == "5g" + + def test_limit_disk_rootfs_none_preserved_in_model_dump(self): + config = DockerDeploymentConfig(limit_disk_rootfs=None) + dump = config.model_dump() + assert dump["limit_disk_rootfs"] is None + + +# ---- DockerDeployment._storage_opts() tests ---- + + +class TestStorageOpts: + """Tests for DockerDeployment._storage_opts() method.""" + + @patch("rock.deployments.docker.DockerSandboxValidator") + def test_storage_opts_with_limit_disk_rootfs(self, _mock_validator): + deployment = DockerDeployment.from_config(DockerDeploymentConfig(limit_disk_rootfs="30g")) + result = deployment._storage_opts() + assert result == ["--storage-opt", "size=30g"] + + @patch("rock.deployments.docker.DockerSandboxValidator") + def test_storage_opts_with_none(self, _mock_validator): + deployment = DockerDeployment.from_config(DockerDeploymentConfig(limit_disk_rootfs=None)) + result = deployment._storage_opts() + assert result == [] + + @patch("rock.deployments.docker.DockerSandboxValidator") + def test_storage_opts_default_value(self, _mock_validator): + deployment = DockerDeployment.from_config(DockerDeploymentConfig()) + result = deployment._storage_opts() + assert result == [] + + @patch("rock.deployments.docker.DockerSandboxValidator") + def test_storage_opts_various_sizes(self, _mock_validator): + for size in ("1g", "512m", "50g", "1t"): + deployment = DockerDeployment.from_config(DockerDeploymentConfig(limit_disk_rootfs=size)) + result = deployment._storage_opts() + assert result == ["--storage-opt", f"size={size}"] + + +# ---- DockerDeployment.start() storage-opt degradation tests ---- + + +def _make_start_mocks(deployment): + deployment.sandbox_validator = MagicMock() + deployment.sandbox_validator.check_availability.return_value = True + deployment.sandbox_validator.check_resource.return_value = True + deployment._pull_image = MagicMock() + deployment.do_port_mapping = AsyncMock() + deployment._prepare_volume_mounts = MagicMock(return_value=[]) + deployment._start_container = AsyncMock() + deployment._wait_until_alive = AsyncMock() + deployment._service_status = MagicMock() + deployment._service_status.get_mapped_port = MagicMock(return_value=8080) + deployment._service_status.phases = {} + + +async def _run_start(deployment): + with ( + patch("rock.deployments.docker.get_executor"), + patch("rock.deployments.docker.asyncio.get_running_loop") as mock_loop, + patch("rock.deployments.docker.wait_until_alive", new_callable=AsyncMock), + patch("rock.deployments.docker.env_vars") as mock_env, + patch("rock.deployments.docker.subprocess"), + ): + mock_env.ROCK_LOGGING_PATH = "" + mock_env.ROCK_TIME_ZONE = "UTC" + mock_loop.return_value.run_in_executor = AsyncMock() + try: + await deployment.start() + except Exception: + pass + + +class TestDockerDeploymentStartDiskLimit: + """Tests that start() applies correct effective values for rootfs and log quotas.""" + + @pytest.mark.asyncio + @patch("rock.deployments.docker.DockerSandboxValidator") + @patch("rock.deployments.docker.DockerUtil.detect_storage_opt_support", return_value=False) + async def test_rootfs_downgraded_when_storage_opt_unsupported(self, _mock_detect, _mock_validator): + """When storage-opt NOT supported: effective_limit_disk_rootfs=None; config unchanged.""" + config = DockerDeploymentConfig(limit_disk_rootfs="50g", image="python:3.11") + deployment = DockerDeployment.from_config(config) + _make_start_mocks(deployment) + await _run_start(deployment) + + assert deployment.config.limit_disk_rootfs == "50g" + assert deployment.effective_limit_disk_rootfs is None + + @pytest.mark.asyncio + @patch("rock.deployments.docker.DockerSandboxValidator") + @patch("rock.deployments.docker.DockerUtil.detect_storage_opt_support", return_value=True) + async def test_rootfs_preserved_when_storage_opt_supported(self, _mock_detect, _mock_validator): + """When storage-opt IS supported: effective_limit_disk_rootfs matches config.""" + config = DockerDeploymentConfig(limit_disk_rootfs="50g", image="python:3.11") + deployment = DockerDeployment.from_config(config) + _make_start_mocks(deployment) + await _run_start(deployment) + + assert deployment.config.limit_disk_rootfs == "50g" + assert deployment.effective_limit_disk_rootfs == "50g" + + @pytest.mark.asyncio + @patch("rock.deployments.docker.DockerSandboxValidator") + @patch("rock.deployments.docker.DockerUtil.detect_storage_opt_support", return_value=False) + async def test_no_error_when_rootfs_already_none(self, _mock_detect, _mock_validator): + """When limit_disk_rootfs is None: start() should not error.""" + config = DockerDeploymentConfig(limit_disk_rootfs=None, image="python:3.11") + deployment = DockerDeployment.from_config(config) + _make_start_mocks(deployment) + await _run_start(deployment) + + assert deployment.config.limit_disk_rootfs is None + assert deployment.effective_limit_disk_rootfs is None + + @pytest.mark.asyncio + @patch("rock.deployments.docker.DockerSandboxValidator") + @patch("rock.deployments.docker.DockerUtil.detect_storage_opt_support", return_value=True) + @patch("rock.deployments.docker.DockerUtil.is_xfs_prjquota_path", return_value=False) + async def test_log_downgraded_when_not_xfs_prjquota(self, _mock_prjquota, _mock_detect, _mock_validator): + """When log path is not XFS+prjquota: effective_limit_disk_log=None; config unchanged. + + Note: log quota has NO dependency on docker being overlay2 — + is_xfs_prjquota_path() is the only gate. + """ + config = DockerDeploymentConfig(limit_disk_log="5g", image="python:3.11") + deployment = DockerDeployment.from_config(config) + _make_start_mocks(deployment) + + with ( + patch("rock.deployments.docker.get_executor"), + patch("rock.deployments.docker.asyncio.get_running_loop") as mock_loop, + patch("rock.deployments.docker.wait_until_alive", new_callable=AsyncMock), + patch("rock.deployments.docker.env_vars") as mock_env, + patch("rock.deployments.docker.subprocess"), + ): + mock_env.ROCK_LOGGING_PATH = "/var/log/rock" + mock_env.ROCK_TIME_ZONE = "UTC" + mock_loop.return_value.run_in_executor = AsyncMock() + try: + await deployment.start() + except Exception: + pass + + assert deployment.config.limit_disk_log == "5g" + assert deployment.effective_limit_disk_log is None + + @patch("rock.deployments.docker.DockerSandboxValidator") + @patch("rock.deployments.docker.DockerUtil.is_xfs_prjquota_path", return_value=False) + def test_log_not_downgraded_when_no_log_path(self, _mock_prjquota, _mock_validator): + """When ROCK_LOGGING_PATH is empty, _try_set_log_dir_quota is never called, + so effective_limit_disk_log remains equal to config.limit_disk_log.""" + config = DockerDeploymentConfig(limit_disk_log="5g", image="python:3.11") + deployment = DockerDeployment.from_config(config) + # effective starts equal to config before start() is called + assert deployment.effective_limit_disk_log == "5g" + + @patch("rock.deployments.docker.DockerSandboxValidator") + @patch("rock.deployments.docker.DockerUtil.is_xfs_prjquota_path", return_value=False) + def test_try_set_log_dir_quota_downgrades_when_not_xfs_prjquota(self, _mock_prjquota, _mock_validator): + """_try_set_log_dir_quota: is_xfs_prjquota_path=False → effective_limit_disk_log=None.""" + config = DockerDeploymentConfig(limit_disk_log="5g", image="python:3.11") + deployment = DockerDeployment.from_config(config) + deployment._effective_limit_disk_log = "5g" + deployment._container_name = "test-container" + + deployment._try_set_log_dir_quota("/var/log/rock/test-container") + + assert deployment.effective_limit_disk_log is None + + @patch("rock.deployments.docker.DockerSandboxValidator") + @patch("rock.deployments.docker.DockerUtil.is_xfs_prjquota_path", return_value=True) + def test_try_set_log_dir_quota_independent_of_docker_driver(self, _mock_prjquota, _mock_validator): + """_try_set_log_dir_quota passes the XFS gate regardless of Docker storage driver. + + Log quota only requires is_xfs_prjquota_path(); overlay2 is irrelevant. + The subprocess calls inside (findmnt, xfs_quota) are mocked to succeed. + """ + config = DockerDeploymentConfig(limit_disk_log="5g", image="python:3.11") + deployment = DockerDeployment.from_config(config) + deployment._effective_limit_disk_log = "5g" + deployment._container_name = "test-container" + + with patch("rock.deployments.docker.subprocess") as mock_sub: + ok = MagicMock() + ok.returncode = 0 + ok.stdout = "/var/log/rock" + mock_sub.run.return_value = ok + deployment._try_set_log_dir_quota("/var/log/rock/test-container") + + # xfs_quota succeeded → effective value preserved + assert deployment.effective_limit_disk_log == "5g" diff --git a/tests/unit/sandbox/job/__init__.py b/tests/unit/sandbox/job/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/sandbox/test_sandbox_manager_disk_limit.py b/tests/unit/sandbox/test_sandbox_manager_disk_limit.py new file mode 100644 index 000000000..7e037cd01 --- /dev/null +++ b/tests/unit/sandbox/test_sandbox_manager_disk_limit.py @@ -0,0 +1,70 @@ +""" +Unit tests for SandboxManager.validate_sandbox_spec() — limit_disk_rootfs validation. + +These tests do NOT require Ray or Docker; they only test the synchronous +validation logic. +""" + +import pytest + +from rock.config import RuntimeConfig, StandardSpec +from rock.deployments.config import DockerDeploymentConfig +from rock.sdk.common.exceptions import BadRequestRockError +from rock.sandbox.sandbox_manager import SandboxManager + + +@pytest.fixture +def runtime_config(): + return RuntimeConfig( + max_allowed_spec=StandardSpec(cpus=16, memory="64g"), + ) + + +class TestValidateSandboxSpecDiskLimit: + """Tests for limit_disk_rootfs validation in SandboxManager.validate_sandbox_spec().""" + + def test_valid_limit_disk_rootfs_20g(self, runtime_config): + config = DockerDeploymentConfig(limit_disk_rootfs="20g") + # Should not raise + SandboxManager.validate_sandbox_spec(None, runtime_config, config) + + def test_valid_limit_disk_rootfs_various_formats(self, runtime_config): + for size in ("1g", "512m", "100gb", "2t", "1024mb", "1024k"): + config = DockerDeploymentConfig(limit_disk_rootfs=size) + SandboxManager.validate_sandbox_spec(None, runtime_config, config) + + def test_valid_limit_disk_rootfs_none(self, runtime_config): + """None limit_disk_rootfs should skip validation (no error).""" + config = DockerDeploymentConfig(limit_disk_rootfs=None) + SandboxManager.validate_sandbox_spec(None, runtime_config, config) + + def test_invalid_limit_disk_rootfs_raises_bad_request(self, runtime_config): + config = DockerDeploymentConfig(limit_disk_rootfs="not-a-size") + with pytest.raises(BadRequestRockError, match="Invalid limit_disk_rootfs size"): + SandboxManager.validate_sandbox_spec(None, runtime_config, config) + + def test_invalid_limit_disk_rootfs_empty_string(self, runtime_config): + config = DockerDeploymentConfig(limit_disk_rootfs="") + with pytest.raises(BadRequestRockError, match="Invalid limit_disk_rootfs size"): + SandboxManager.validate_sandbox_spec(None, runtime_config, config) + + def test_invalid_limit_disk_rootfs_negative(self, runtime_config): + config = DockerDeploymentConfig(limit_disk_rootfs="-10g") + with pytest.raises(BadRequestRockError, match="Invalid limit_disk_rootfs size"): + SandboxManager.validate_sandbox_spec(None, runtime_config, config) + + def test_invalid_limit_disk_rootfs_no_unit(self, runtime_config): + """A bare number without unit should still be parsed (as bytes).""" + config = DockerDeploymentConfig(limit_disk_rootfs="1024") + # Bare number is treated as bytes by parse_size_to_bytes, so it should pass + SandboxManager.validate_sandbox_spec(None, runtime_config, config) + + def test_invalid_limit_disk_rootfs_only_unit(self, runtime_config): + config = DockerDeploymentConfig(limit_disk_rootfs="gb") + with pytest.raises(BadRequestRockError, match="Invalid limit_disk_rootfs size"): + SandboxManager.validate_sandbox_spec(None, runtime_config, config) + + def test_limit_disk_rootfs_validation_independent_of_cpu_memory(self, runtime_config): + """limit_disk_rootfs validation should not interfere with cpu/memory checks.""" + config = DockerDeploymentConfig(cpus=2, memory="8g", limit_disk_rootfs="50g") + SandboxManager.validate_sandbox_spec(None, runtime_config, config) diff --git a/tests/unit/utils/test_docker_util.py b/tests/unit/utils/test_docker_util.py new file mode 100644 index 000000000..e6745deb7 --- /dev/null +++ b/tests/unit/utils/test_docker_util.py @@ -0,0 +1,181 @@ +""" +Unit tests for DockerUtil.detect_storage_opt_support(). + +All subprocess calls are mocked so no Docker daemon is required. +""" + +import json +import subprocess +from unittest.mock import MagicMock, patch + +import pytest + +from rock.utils.docker import DockerUtil + + +# ---- helpers ---- + + +def _make_run_result(returncode=0, stdout="", stderr=""): + r = MagicMock(spec=subprocess.CompletedProcess) + r.returncode = returncode + r.stdout = stdout + r.stderr = stderr + return r + + +def _docker_info_json(driver="overlay2", backing_fs="xfs", docker_root="/var/lib/docker"): + """Return a JSON string mimicking `docker info --format '{{json .}}'`.""" + info = { + "Driver": driver, + "DriverStatus": [ + ["Backing Filesystem", backing_fs], + ["Supports d_type", "true"], + ], + "DockerRootDir": docker_root, + } + return json.dumps(info) + + +# ---- detect_storage_opt_support tests ---- + + +class TestDetectStorageOptSupport: + """Tests for DockerUtil.detect_storage_opt_support().""" + + # findmnt now returns "FSTYPE OPTIONS" (delegated to is_xfs_prjquota_path) + @patch("rock.utils.docker.subprocess.run") + def test_all_requirements_met_prjquota(self, mock_run): + """Should return True when overlay2 + xfs + prjquota are all present.""" + mock_run.side_effect = [ + # docker info + _make_run_result(stdout=_docker_info_json()), + # findmnt for DockerRootDir (FSTYPE OPTIONS) + _make_run_result(stdout="xfs rw,relatime,attr2,inode64,prjquota"), + ] + assert DockerUtil.detect_storage_opt_support() is True + + @patch("rock.utils.docker.subprocess.run") + def test_all_requirements_met_pquota(self, mock_run): + """Should return True when pquota (synonym for prjquota) is present.""" + mock_run.side_effect = [ + _make_run_result(stdout=_docker_info_json()), + _make_run_result(stdout="xfs rw,relatime,attr2,inode64,pquota"), + ] + assert DockerUtil.detect_storage_opt_support() is True + + @patch("rock.utils.docker.subprocess.run") + def test_non_overlay2_driver(self, mock_run): + """Should return False if the storage driver is not overlay2.""" + mock_run.return_value = _make_run_result(stdout=_docker_info_json(driver="aufs")) + assert DockerUtil.detect_storage_opt_support() is False + + @patch("rock.utils.docker.subprocess.run") + def test_non_xfs_docker_root(self, mock_run): + """Should return False if DockerRootDir is not on XFS.""" + mock_run.side_effect = [ + _make_run_result(stdout=_docker_info_json()), + _make_run_result(stdout="ext4 rw,relatime"), + ] + assert DockerUtil.detect_storage_opt_support() is False + + @patch("rock.utils.docker.subprocess.run") + def test_missing_prjquota(self, mock_run): + """Should return False when mount options lack prjquota/pquota.""" + mock_run.side_effect = [ + _make_run_result(stdout=_docker_info_json()), + _make_run_result(stdout="xfs rw,relatime,attr2,inode64,noquota"), + ] + assert DockerUtil.detect_storage_opt_support() is False + + @patch("rock.utils.docker.subprocess.run") + def test_docker_info_fails(self, mock_run): + """Should return False if docker info returns non-zero exit code.""" + mock_run.return_value = _make_run_result(returncode=1, stderr="Cannot connect to Docker daemon") + assert DockerUtil.detect_storage_opt_support() is False + + @patch("rock.utils.docker.subprocess.run") + def test_docker_info_raises_exception(self, mock_run): + """Should return False if docker info subprocess raises.""" + mock_run.side_effect = FileNotFoundError("docker not found") + assert DockerUtil.detect_storage_opt_support() is False + + @patch("rock.utils.docker.subprocess.run") + def test_findmnt_fails(self, mock_run): + """Should return False if findmnt returns non-zero exit code.""" + mock_run.side_effect = [ + _make_run_result(stdout=_docker_info_json()), + _make_run_result(returncode=1, stderr="findmnt: failed"), + ] + assert DockerUtil.detect_storage_opt_support() is False + + @patch("rock.utils.docker.subprocess.run") + def test_findmnt_raises_exception(self, mock_run): + """Should return False if findmnt subprocess raises.""" + mock_run.side_effect = [ + _make_run_result(stdout=_docker_info_json()), + subprocess.TimeoutExpired(cmd="findmnt", timeout=5), + ] + assert DockerUtil.detect_storage_opt_support() is False + + @patch("rock.utils.docker.subprocess.run") + def test_missing_docker_root_dir(self, mock_run): + """Should return False if DockerRootDir is missing from docker info.""" + info = {"Driver": "overlay2", "DriverStatus": [["Backing Filesystem", "xfs"]]} + mock_run.return_value = _make_run_result(stdout=json.dumps(info)) + assert DockerUtil.detect_storage_opt_support() is False + + +# ---- is_xfs_prjquota_path tests ---- + + +class TestIsXfsPrjquotaPath: + """Tests for DockerUtil.is_xfs_prjquota_path(). + + Unlike detect_storage_opt_support(), this check is path-local and has + no dependency on Docker's storage driver. + """ + + @patch("rock.utils.docker.subprocess.run") + def test_xfs_with_prjquota(self, mock_run): + mock_run.return_value = _make_run_result(stdout="xfs rw,relatime,attr2,inode64,prjquota") + assert DockerUtil.is_xfs_prjquota_path("/data/logs") is True + + @patch("rock.utils.docker.subprocess.run") + def test_xfs_with_pquota_synonym(self, mock_run): + """pquota is a synonym for prjquota and must also be accepted.""" + mock_run.return_value = _make_run_result(stdout="xfs rw,relatime,attr2,inode64,pquota") + assert DockerUtil.is_xfs_prjquota_path("/data/logs") is True + + @patch("rock.utils.docker.subprocess.run") + def test_xfs_without_prjquota(self, mock_run): + """XFS mount without prjquota/pquota should return False.""" + mock_run.return_value = _make_run_result(stdout="xfs rw,relatime,attr2,inode64,noquota") + assert DockerUtil.is_xfs_prjquota_path("/data/logs") is False + + @patch("rock.utils.docker.subprocess.run") + def test_non_xfs_with_prjquota(self, mock_run): + """ext4 with prjquota-like options should return False (not XFS).""" + mock_run.return_value = _make_run_result(stdout="ext4 rw,relatime,prjquota") + assert DockerUtil.is_xfs_prjquota_path("/data/logs") is False + + @patch("rock.utils.docker.subprocess.run") + def test_findmnt_failure(self, mock_run): + mock_run.return_value = _make_run_result(returncode=1, stderr="findmnt: failed") + assert DockerUtil.is_xfs_prjquota_path("/data/logs") is False + + @patch("rock.utils.docker.subprocess.run") + def test_findmnt_exception(self, mock_run): + mock_run.side_effect = subprocess.TimeoutExpired(cmd="findmnt", timeout=5) + assert DockerUtil.is_xfs_prjquota_path("/data/logs") is False + + @patch("rock.utils.docker.subprocess.run") + def test_empty_output(self, mock_run): + mock_run.return_value = _make_run_result(stdout="") + assert DockerUtil.is_xfs_prjquota_path("/data/logs") is False + + @patch("rock.utils.docker.subprocess.run") + def test_only_fstype_no_options(self, mock_run): + """If findmnt returns only FSTYPE with no OPTIONS column, return False.""" + mock_run.return_value = _make_run_result(stdout="xfs") + assert DockerUtil.is_xfs_prjquota_path("/data/logs") is False