Add streaming LLM proposal support
This commit is contained in:
@@ -253,9 +253,23 @@ class CoreFlowTests(unittest.TestCase):
|
||||
self.assertEqual(endpoint.provider, "codex")
|
||||
self.assertEqual(endpoint.base_url, "http://codex.example/v1")
|
||||
self.assertEqual(endpoint.wire_api, "responses")
|
||||
self.assertFalse(endpoint.stream)
|
||||
self.assertEqual(endpoint.reasoning_effort, "high")
|
||||
self.assertEqual(endpoint.api_key_env, "OPENAI_API_KEY")
|
||||
|
||||
def test_endpoint_stream_flag(self) -> None:
|
||||
endpoint = LLMEndpointSpec.from_dict(
|
||||
{
|
||||
"provider": "custom",
|
||||
"base_url": "http://example/v1",
|
||||
"wire_api": "chat.completions",
|
||||
"stream": True,
|
||||
"model": "x",
|
||||
"api_key_env": "OPENAI_API_KEY",
|
||||
}
|
||||
)
|
||||
self.assertTrue(endpoint.stream)
|
||||
|
||||
def test_extract_response_text_supports_responses_api_output(self) -> None:
|
||||
text = _extract_response_text(
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user