From 67c0411d67ef6011e59c59066dd76b86e62831fa Mon Sep 17 00:00:00 2001 From: changtao Date: Wed, 8 Oct 2025 23:25:37 +0800 Subject: [PATCH] fix CVE-2025-6242 --- backport-CVE-2025-6242.patch | 169 +++++++++++++++++++++++++++++++++++ vllm.spec | 11 ++- 2 files changed, 179 insertions(+), 1 deletion(-) create mode 100644 backport-CVE-2025-6242.patch diff --git a/backport-CVE-2025-6242.patch b/backport-CVE-2025-6242.patch new file mode 100644 index 0000000..6d1c27c --- /dev/null +++ b/backport-CVE-2025-6242.patch @@ -0,0 +1,169 @@ +From 9d9a2b77f19f68262d5e469c4e82c0f6365ad72d Mon Sep 17 00:00:00 2001 +From: Chenheli Hua +Date: Thu, 2 Oct 2025 10:27:10 -0700 +Subject: [PATCH] [Small] Prevent bypassing media domain restriction via HTTP + redirects (#26035) + +Signed-off-by: Chenheli Hua +Signed-off-by: simon-mo +--- + docs/features/multimodal_inputs.md | 7 +++++++ + docs/usage/security.md | 3 +++ + vllm/connections.py | 24 ++++++++++++++++++------ + vllm/envs.py | 6 ++++++ + vllm/multimodal/utils.py | 12 ++++++++++-- + 5 files changed, 44 insertions(+), 8 deletions(-) + +diff --git a/docs/features/multimodal_inputs.md b/docs/features/multimodal_inputs.md +index 19b6681..a30053a 100644 +--- a/docs/features/multimodal_inputs.md ++++ b/docs/features/multimodal_inputs.md +@@ -5,6 +5,13 @@ title: Multimodal Inputs + + This page teaches you how to pass multi-modal inputs to [multi-modal models][supported-mm-models] in vLLM. + ++!!! tip ++ When serving multi-modal models, consider setting `--allowed-media-domains` to restrict domain that vLLM can access to prevent it from accessing arbitrary endpoints that can potentially be vulnerable to Server-Side Request Forgery (SSRF) attacks. You can provide a list of domains for this arg. For example: `--allowed-media-domains upload.wikimedia.org github.com www.bogotobogo.com` ++ ++ Also, consider setting `VLLM_MEDIA_URL_ALLOW_REDIRECTS=0` to prevent HTTP redirects from being followed to bypass domain restrictions. ++ ++ This restriction is especially important if you run vLLM in a containerized environment where the vLLM pods may have unrestricted access to internal networks. ++ + !!! note + We are actively iterating on multi-modal support. See [this RFC](gh-issue:4194) for upcoming changes, + and [open an issue on GitHub](https://github.com/vllm-project/vllm/issues/new/choose) if you have any feedback or feature requests. +diff --git a/docs/usage/security.md b/docs/usage/security.md +index 7614043..6089246 100644 +--- a/docs/usage/security.md ++++ b/docs/usage/security.md +@@ -54,6 +54,9 @@ Key points from the PyTorch security guide: + - Implement proper authentication and authorization for management interfaces + - Follow the principle of least privilege for all system components + ++Also, consider setting `VLLM_MEDIA_URL_ALLOW_REDIRECTS=0` to prevent HTTP ++redirects from being followed to bypass domain restrictions. ++ + ## Security and Firewalls: Protecting Exposed vLLM Systems + + While vLLM is designed to allow unsafe network services to be isolated to +diff --git a/vllm/connections.py b/vllm/connections.py +index 103505e..1f34171 100644 +--- a/vllm/connections.py ++++ b/vllm/connections.py +@@ -54,6 +54,7 @@ class HTTPConnection: + stream: bool = False, + timeout: Optional[float] = None, + extra_headers: Optional[Mapping[str, str]] = None, ++ allow_redirects: bool = True, + ): + self._validate_http_url(url) + +@@ -63,7 +64,8 @@ class HTTPConnection: + return client.get(url, + headers=self._headers(**extra_headers), + stream=stream, +- timeout=timeout) ++ timeout=timeout, ++ allow_redirects=allow_redirects) + + async def get_async_response( + self, +@@ -71,6 +73,7 @@ class HTTPConnection: + *, + timeout: Optional[float] = None, + extra_headers: Optional[Mapping[str, str]] = None, ++ allow_redirects: bool = True, + ): + self._validate_http_url(url) + +@@ -79,10 +82,17 @@ class HTTPConnection: + + return client.get(url, + headers=self._headers(**extra_headers), +- timeout=timeout) +- +- def get_bytes(self, url: str, *, timeout: Optional[float] = None) -> bytes: +- with self.get_response(url, timeout=timeout) as r: ++ timeout=timeout, ++ allow_redirects=allow_redirects) ++ ++ def get_bytes(self, ++ url: str, ++ *, ++ timeout: Optional[float] = None, ++ allow_redirects: bool = True) -> bytes: ++ with self.get_response(url, ++ timeout=timeout, ++ allow_redirects=allow_redirects) as r: + r.raise_for_status() + + return r.content +@@ -92,8 +102,10 @@ class HTTPConnection: + url: str, + *, + timeout: Optional[float] = None, ++ allow_redirects: bool = True, + ) -> bytes: +- async with await self.get_async_response(url, timeout=timeout) as r: ++ async with await self.get_async_response( ++ url, timeout=timeout, allow_redirects=allow_redirects) as r: + r.raise_for_status() + + return await r.read() +diff --git a/vllm/envs.py b/vllm/envs.py +index 80c5f28..7644a6f 100644 +--- a/vllm/envs.py ++++ b/vllm/envs.py +@@ -59,6 +59,7 @@ if TYPE_CHECKING: + VLLM_IMAGE_FETCH_TIMEOUT: int = 5 + VLLM_VIDEO_FETCH_TIMEOUT: int = 30 + VLLM_AUDIO_FETCH_TIMEOUT: int = 10 ++ VLLM_MEDIA_URL_ALLOW_REDIRECTS: bool = True + VLLM_VIDEO_LOADER_BACKEND: str = "opencv" + VLLM_MM_INPUT_CACHE_GIB: int = 8 + VLLM_TARGET_DEVICE: str = "cuda" +@@ -498,6 +499,11 @@ environment_variables: dict[str, Callable[[], Any]] = { + "VLLM_AUDIO_FETCH_TIMEOUT": + lambda: int(os.getenv("VLLM_AUDIO_FETCH_TIMEOUT", "10")), + ++ # Whether to allow HTTP redirects when fetching from media URLs. ++ # Default to True ++ "VLLM_MEDIA_URL_ALLOW_REDIRECTS": ++ lambda: bool(int(os.getenv("VLLM_MEDIA_URL_ALLOW_REDIRECTS", "1"))), ++ + # Backend for Video IO + # - "opencv": Default backend that uses OpenCV stream buffered backend. + # +diff --git a/vllm/multimodal/utils.py b/vllm/multimodal/utils.py +index 11a25f8..c09a70d 100644 +--- a/vllm/multimodal/utils.py ++++ b/vllm/multimodal/utils.py +@@ -105,7 +105,11 @@ class MediaConnector: + + if url_spec.scheme.startswith("http"): + connection = self.connection +- data = connection.get_bytes(url, timeout=fetch_timeout) ++ data = connection.get_bytes( ++ url, ++ timeout=fetch_timeout, ++ allow_redirects=envs.VLLM_MEDIA_URL_ALLOW_REDIRECTS, ++ ) + + return media_io.load_bytes(data) + +@@ -129,7 +133,11 @@ class MediaConnector: + + if url_spec.scheme.startswith("http"): + connection = self.connection +- data = await connection.async_get_bytes(url, timeout=fetch_timeout) ++ data = await connection.async_get_bytes( ++ url, ++ timeout=fetch_timeout, ++ allow_redirects=envs.VLLM_MEDIA_URL_ALLOW_REDIRECTS, ++ ) + + return media_io.load_bytes(data) + +-- +2.46.0 + diff --git a/vllm.spec b/vllm.spec index 3ebe760..b91f107 100644 --- a/vllm.spec +++ b/vllm.spec @@ -3,12 +3,14 @@ Name: vllm Version: 0.9.1 -Release: 1 +Release: 2 Summary: Powerful engine for LLMs License: (Apache-2.0 AND BSD-3-Clause) OR BSD-3-CLause URL: https://github.com/vllm-project/vllm Source0: https://gitee.com/src-openeuler/vllm/raw/master/vllm-%{version}.tar.gz +Patch0001: backport-CVE-2025-6242.patch + BuildArch: noarch %description @@ -28,6 +30,7 @@ Buildrequires: python3-pytorch %prep %autosetup -n %{name}-%{version} -N +%autopatch -p1 %build export SETUPTOOLS_SCM_PRETEND_VERSION=%{version} @@ -69,6 +72,12 @@ mv %{buildroot}/filelist.lst . %files -n python3-%{_name} -f filelist.lst %changelog +* Thu Oct 9 2025 changtao - 0.9.1-2 +- Type:CVE +- CVE:CVE-2025-6242 +- SUG:NA +- DESC:fix CVE-2025-6242 + * Fri Jul 4 2025 gongzequn - 0.9.1-1 - Change the baseline version to 0.9.1 -- Gitee