Force codex stream to use chat completions
This commit is contained in:
@@ -415,6 +415,7 @@ class LLMEndpointSpec:
|
|||||||
base_url = str(data.get("base_url") or "").strip()
|
base_url = str(data.get("base_url") or "").strip()
|
||||||
wire_api = str(data.get("wire_api") or "").strip()
|
wire_api = str(data.get("wire_api") or "").strip()
|
||||||
stream = data.get("stream")
|
stream = data.get("stream")
|
||||||
|
stream_value = _require_bool(stream, context="llm.endpoint.stream") if stream is not None else False
|
||||||
reasoning_effort = str(data.get("reasoning_effort") or "").strip()
|
reasoning_effort = str(data.get("reasoning_effort") or "").strip()
|
||||||
api_key_env = str(data.get("api_key_env") or "").strip()
|
api_key_env = str(data.get("api_key_env") or "").strip()
|
||||||
if provider == "codex":
|
if provider == "codex":
|
||||||
@@ -428,6 +429,8 @@ class LLMEndpointSpec:
|
|||||||
wire_api = resolved_wire_api
|
wire_api = resolved_wire_api
|
||||||
if not reasoning_effort and resolved_reasoning_effort:
|
if not reasoning_effort and resolved_reasoning_effort:
|
||||||
reasoning_effort = resolved_reasoning_effort
|
reasoning_effort = resolved_reasoning_effort
|
||||||
|
if stream_value:
|
||||||
|
wire_api = "chat.completions"
|
||||||
if not api_key_env:
|
if not api_key_env:
|
||||||
api_key_env = "OPENAI_API_KEY"
|
api_key_env = "OPENAI_API_KEY"
|
||||||
elif provider == "bailian":
|
elif provider == "bailian":
|
||||||
@@ -451,7 +454,7 @@ class LLMEndpointSpec:
|
|||||||
model=_require_str(data.get("model"), context="llm.endpoint.model"),
|
model=_require_str(data.get("model"), context="llm.endpoint.model"),
|
||||||
provider=provider,
|
provider=provider,
|
||||||
wire_api=_require_str(wire_api, context="llm.endpoint.wire_api"),
|
wire_api=_require_str(wire_api, context="llm.endpoint.wire_api"),
|
||||||
stream=(_require_bool(stream, context="llm.endpoint.stream") if stream is not None else False),
|
stream=stream_value,
|
||||||
reasoning_effort=reasoning_effort or None,
|
reasoning_effort=reasoning_effort or None,
|
||||||
api_key_env=_require_str(api_key_env, context="llm.endpoint.api_key_env"),
|
api_key_env=_require_str(api_key_env, context="llm.endpoint.api_key_env"),
|
||||||
timeout_s=_require_float(
|
timeout_s=_require_float(
|
||||||
|
|||||||
@@ -288,6 +288,30 @@ class CoreFlowTests(unittest.TestCase):
|
|||||||
self.assertEqual(endpoint.reasoning_effort, "high")
|
self.assertEqual(endpoint.reasoning_effort, "high")
|
||||||
self.assertEqual(endpoint.api_key_env, "OPENAI_API_KEY")
|
self.assertEqual(endpoint.api_key_env, "OPENAI_API_KEY")
|
||||||
|
|
||||||
|
def test_codex_stream_forces_chat_completions_wire_api(self) -> None:
|
||||||
|
with tempfile.TemporaryDirectory() as tmp:
|
||||||
|
tmp_path = Path(tmp)
|
||||||
|
codex_dir = tmp_path / ".codex"
|
||||||
|
codex_dir.mkdir(parents=True)
|
||||||
|
(codex_dir / "config.toml").write_text(
|
||||||
|
'\n'.join(
|
||||||
|
[
|
||||||
|
'model_provider = "ipads"',
|
||||||
|
"",
|
||||||
|
"[model_providers.ipads]",
|
||||||
|
'base_url = "http://codex.example/v1"',
|
||||||
|
'wire_api = "responses"',
|
||||||
|
]
|
||||||
|
),
|
||||||
|
encoding="utf-8",
|
||||||
|
)
|
||||||
|
with mock.patch.dict(os.environ, {"HOME": str(tmp_path)}, clear=True):
|
||||||
|
endpoint = LLMEndpointSpec.from_dict(
|
||||||
|
{"provider": "codex", "model": "gpt-5.4", "stream": True}
|
||||||
|
)
|
||||||
|
self.assertTrue(endpoint.stream)
|
||||||
|
self.assertEqual(endpoint.wire_api, "chat.completions")
|
||||||
|
|
||||||
def test_endpoint_stream_flag(self) -> None:
|
def test_endpoint_stream_flag(self) -> None:
|
||||||
endpoint = LLMEndpointSpec.from_dict(
|
endpoint = LLMEndpointSpec.from_dict(
|
||||||
{
|
{
|
||||||
|
|||||||
Reference in New Issue
Block a user