See the full trace from llamastack, from the execution of the above py
script
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:57:33.294 [START] /v1/tools
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:57:33.325 [END] /v1/tools [StatusCode.OK] (31.43ms)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack INFO: 127.0.0.1:57472 - "GET /v1/models HTTP/1.1" 200 OK
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:15.930 [START] /v1/models
llamastackdistribution-sample-65465df48d-mslk6 llama-stack INFO: 127.0.0.1:57472 - "POST /v1/toolgroups HTTP/1.1" 200 OK
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:15.974 [END] /v1/models [StatusCode.OK] (43.72ms)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack INFO: 127.0.0.1:57472 - "POST /v1/agents HTTP/1.1" 200 OK
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:16.005 [START] /v1/toolgroups
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:16.069 [END] /v1/toolgroups [StatusCode.OK] (63.53ms)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:16.129 [START] /v1/agents
llamastackdistribution-sample-65465df48d-mslk6 llama-stack ERROR 2025-06-27 09:59:16,030 __main__:237 server: Error executing endpoint route='/v1/tools' method='get'
llamastackdistribution-sample-65465df48d-mslk6 llama-stack ╭───────────────────────────────────── Traceback (most recent call last) ─────────────────────────────────────╮
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py:235 in route_handler │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 232 │ │ │ │ │ return StreamingResponse(gen, media_type="text/event-stream") │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 233 │ │ │ │ else: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 234 │ │ │ │ │ value = func(**kwargs) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 235 │ │ │ │ │ return await maybe_await(value) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 236 │ │ │ except Exception as e: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 237 │ │ │ │ logger.exception(f"Error executing endpoint {route=} {method=}") │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 238 │ │ │ │ raise translate_exception(e) from e │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py:172 in maybe_await │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 169 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 170 async def maybe_await(value): │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 171 │ if inspect.iscoroutine(value): │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 172 │ │ return await value │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 173 │ return value │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 174 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 175 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py:103 in │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ async_wrapper │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 100 │ │ │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 101 │ │ │ with tracing.span(f"{class_name}.{method_name}", span_attributes) as span: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 102 │ │ │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 103 │ │ │ │ │ result = await method(self, *args, **kwargs) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 104 │ │ │ │ │ span.set_attribute("output", serialize_value(result)) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 105 │ │ │ │ │ return result │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 106 │ │ │ │ except Exception as e: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py:56 in │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ list_tools │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │ │ all_tools = [] │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 54 │ │ for toolgroup in toolgroups: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ │ │ if toolgroup.identifier not in self.toolgroups_to_tools: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 56 │ │ │ │ await self._index_tools(toolgroup) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ │ all_tools.extend(self.toolgroups_to_tools[toolgroup.identifier]) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 58 │ │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 59 │ │ return ListToolsResponse(data=all_tools) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py:63 in │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ _index_tools │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 60 │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 61 │ async def _index_tools(self, toolgroup: ToolGroup): │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 62 │ │ provider_impl = super().get_provider_impl(toolgroup.identifier, │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ toolgroup.provider_id) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 63 │ │ tooldefs_response = await provider_impl.list_runtime_tools(toolgroup.identifier, │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ toolgroup.mcp_endpoint) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 64 │ │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 65 │ │ # TODO: kill this Tool vs ToolDef distinction │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 66 │ │ tooldefs = tooldefs_response.data │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py:103 in │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ async_wrapper │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 100 │ │ │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 101 │ │ │ with tracing.span(f"{class_name}.{method_name}", span_attributes) as span: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 102 │ │ │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 103 │ │ │ │ │ result = await method(self, *args, **kwargs) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 104 │ │ │ │ │ span.set_attribute("output", serialize_value(result)) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 105 │ │ │ │ │ return result │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 106 │ │ │ │ except Exception as e: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/remote/tool_runtime/model_context_protocol/mo │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ del_context_protocol.py:48 in list_runtime_tools │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 45 │ │ if mcp_endpoint is None: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 46 │ │ │ raise ValueError("mcp_endpoint is required") │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 47 │ │ headers = await self.get_headers_from_request(mcp_endpoint.uri) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 48 │ │ return await list_mcp_tools(mcp_endpoint.uri, headers) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 49 │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 50 │ async def invoke_tool(self, tool_name: str, kwargs: dict[str, Any]) -> │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ToolInvocationResult: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 51 │ │ tool = await self.tool_store.get_tool(tool_name) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py:56 in list_mcp_tools │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 54 async def list_mcp_tools(endpoint: str, headers: dict[str, str]) -> │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ListToolDefsResponse: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ tools = [] │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 56 │ async with sse_client_wrapper(endpoint, headers) as session: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ tools_result = await session.list_tools() │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 58 │ │ for tool in tools_result.tools: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 59 │ │ │ parameters = [] │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/contextlib.py:199 in __aenter__ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 196 │ │ # they are only needed for recreation, which is not possible anymore │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 197 │ │ del self.args, self.kwds, self.func │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 198 │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 199 │ │ │ return await anext(self.gen) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 200 │ │ except StopAsyncIteration: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 201 │ │ │ raise RuntimeError("generator didn't yield") from None │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 202 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py:39 in sse_client_wrapper │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 36 @asynccontextmanager │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 37 async def sse_client_wrapper(endpoint: str, headers: dict[str, str]): │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 38 │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 39 │ │ async with sse_client(endpoint, headers=headers) as streams: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 40 │ │ │ async with ClientSession(*streams) as session: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 41 │ │ │ │ await session.initialize() │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 42 │ │ │ │ yield session │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/contextlib.py:199 in __aenter__ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 196 │ │ # they are only needed for recreation, which is not possible anymore │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 197 │ │ del self.args, self.kwds, self.func │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 198 │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 199 │ │ │ return await anext(self.gen) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 200 │ │ except StopAsyncIteration: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 201 │ │ │ raise RuntimeError("generator didn't yield") from None │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 202 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/mcp/client/sse.py:54 in sse_client │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 51 │ read_stream_writer, read_stream = anyio.create_memory_object_stream(0) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 52 │ write_stream, write_stream_reader = anyio.create_memory_object_stream(0) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 54 │ async with anyio.create_task_group() as tg: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 56 │ │ │ logger.debug(f"Connecting to SSE endpoint: {remove_request_params(url)}") │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ │ async with httpx_client_factory(headers=headers, auth=auth) as client: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py:772 in __aexit__ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 769 │ │ │ │ │ # added to self._exceptions so it's ok to break exception │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 770 │ │ │ │ │ # chaining and avoid adding a "During handling of above..." │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 771 │ │ │ │ │ # for each nesting level. │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 772 │ │ │ │ │ raise BaseExceptionGroup( │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 773 │ │ │ │ │ │ "unhandled errors in a TaskGroup", self._exceptions │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 774 │ │ │ │ │ ) from None │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 775 │ │ │ │ elif exc_val: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
llamastackdistribution-sample-65465df48d-mslk6 llama-stack ExceptionGroup: unhandled errors in a TaskGroup (1 sub-exception)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack INFO: 127.0.0.1:57472 - "GET /v1/tools?toolgroup_id=mcp%3A%3Atest HTTP/1.1" 500 Internal Server Error
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:16.139 [END] /v1/agents [StatusCode.OK] (10.34ms)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:16.207 [START] /v1/tools
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:16.311 [END] /v1/tools [StatusCode.OK] (103.72ms)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:16.181 [ERROR] Error executing endpoint route='/v1/tools' method='get'
llamastackdistribution-sample-65465df48d-mslk6 llama-stack + Exception Group Traceback (most recent call last):
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py", line 235, in route_handler
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await maybe_await(value)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py", line 172, in maybe_await
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await value
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py", line 103, in async_wrapper
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | result = await method(self, *args, **kwargs)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py", line 56, in list_tools
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | await self._index_tools(toolgroup)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py", line 63, in _index_tools
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | tooldefs_response = await provider_impl.list_runtime_tools(toolgroup.identifier, toolgroup.mcp_endpoint)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py", line 103, in async_wrapper
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | result = await method(self, *args, **kwargs)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py", line 48, in list_runtime_tools
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await list_mcp_tools(mcp_endpoint.uri, headers)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py", line 56, in list_mcp_tools
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with sse_client_wrapper(endpoint, headers) as session:
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in __aenter__
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py", line 39, in sse_client_wrapper
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with sse_client(endpoint, headers=headers) as streams:
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in __aenter__
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/mcp/client/sse.py", line 54, in sse_client
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with anyio.create_task_group() as tg:
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 772, in __aexit__
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise BaseExceptionGroup(
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | exceptiongroup.ExceptionGroup: unhandled errors in a TaskGroup (1 sub-exception)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack +-+---------------- 1 ----------------
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | Traceback (most recent call last):
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 101, in map_httpcore_exceptions
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | yield
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 394, in handle_async_request
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | resp = await self._pool.handle_async_request(req)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 256, in handle_async_request
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise exc from None
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 236, in handle_async_request
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await connection.handle_async_request(
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection.py", line 101, in handle_async_request
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise exc
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection.py", line 78, in handle_async_request
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | stream = await self._connect(request)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection.py", line 124, in _connect
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | stream = await self._network_backend.connect_tcp(**kwargs)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_backends/auto.py", line 31, in connect_tcp
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await self._backend.connect_tcp(
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_backends/anyio.py", line 113, in connect_tcp
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | with map_exceptions(exc_map):
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 153, in __exit__
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | self.gen.throw(typ, value, traceback)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_exceptions.py", line 14, in map_exceptions
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise to_exc(exc) from exc
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | httpcore.ConnectError: All connection attempts failed
llamastackdistribution-sample-65465df48d-mslk6 llama-stack |
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | The above exception was the direct cause of the following exception:
llamastackdistribution-sample-65465df48d-mslk6 llama-stack |
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | Traceback (most recent call last):
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/mcp/client/sse.py", line 58, in sse_client
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with aconnect_sse(
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in __aenter__
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx_sse/_api.py", line 69, in aconnect_sse
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with client.stream(method, url, headers=headers, **kwargs) as response:
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in __aenter__
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1583, in stream
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self.send(
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1629, in send
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self._send_handling_auth(
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1657, in _send_handling_auth
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self._send_handling_redirects(
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1694, in _send_handling_redirects
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self._send_single_request(request)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1730, in _send_single_request
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await transport.handle_async_request(request)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 393, in handle_async_request
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | with map_httpcore_exceptions():
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 153, in __exit__
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | self.gen.throw(typ, value, traceback)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 118, in map_httpcore_exceptions
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise mapped_exc(message) from exc
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | httpx.ConnectError: All connection attempts failed
llamastackdistribution-sample-65465df48d-mslk6 llama-stack +------------------------------------
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:16.624 [START] /v1/tools
llamastackdistribution-sample-65465df48d-mslk6 llama-stack ERROR 2025-06-27 09:59:16,643 __main__:237 server: Error executing endpoint route='/v1/tools' method='get'
llamastackdistribution-sample-65465df48d-mslk6 llama-stack ╭───────────────────────────────────── Traceback (most recent call last) ─────────────────────────────────────╮
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py:235 in route_handler │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 232 │ │ │ │ │ return StreamingResponse(gen, media_type="text/event-stream") │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 233 │ │ │ │ else: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 234 │ │ │ │ │ value = func(**kwargs) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 235 │ │ │ │ │ return await maybe_await(value) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 236 │ │ │ except Exception as e: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 237 │ │ │ │ logger.exception(f"Error executing endpoint {route=} {method=}") │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 238 │ │ │ │ raise translate_exception(e) from e │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py:172 in maybe_await │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 169 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 170 async def maybe_await(value): │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 171 │ if inspect.iscoroutine(value): │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 172 │ │ return await value │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 173 │ return value │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 174 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 175 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py:103 in │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ async_wrapper │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 100 │ │ │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 101 │ │ │ with tracing.span(f"{class_name}.{method_name}", span_attributes) as span: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 102 │ │ │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 103 │ │ │ │ │ result = await method(self, *args, **kwargs) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 104 │ │ │ │ │ span.set_attribute("output", serialize_value(result)) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 105 │ │ │ │ │ return result │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 106 │ │ │ │ except Exception as e: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py:56 in │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ list_tools │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │ │ all_tools = [] │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 54 │ │ for toolgroup in toolgroups: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ │ │ if toolgroup.identifier not in self.toolgroups_to_tools: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 56 │ │ │ │ await self._index_tools(toolgroup) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ │ all_tools.extend(self.toolgroups_to_tools[toolgroup.identifier]) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 58 │ │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 59 │ │ return ListToolsResponse(data=all_tools) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py:63 in │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ _index_tools │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 60 │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 61 │ async def _index_tools(self, toolgroup: ToolGroup): │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 62 │ │ provider_impl = super().get_provider_impl(toolgroup.identifier, │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ toolgroup.provider_id) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 63 │ │ tooldefs_response = await provider_impl.list_runtime_tools(toolgroup.identifier, │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ toolgroup.mcp_endpoint) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 64 │ │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 65 │ │ # TODO: kill this Tool vs ToolDef distinction │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 66 │ │ tooldefs = tooldefs_response.data │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py:103 in │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ async_wrapper │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 100 │ │ │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 101 │ │ │ with tracing.span(f"{class_name}.{method_name}", span_attributes) as span: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 102 │ │ │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 103 │ │ │ │ │ result = await method(self, *args, **kwargs) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 104 │ │ │ │ │ span.set_attribute("output", serialize_value(result)) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 105 │ │ │ │ │ return result │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 106 │ │ │ │ except Exception as e: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/remote/tool_runtime/model_context_protocol/mo │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ del_context_protocol.py:48 in list_runtime_tools │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 45 │ │ if mcp_endpoint is None: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 46 │ │ │ raise ValueError("mcp_endpoint is required") │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 47 │ │ headers = await self.get_headers_from_request(mcp_endpoint.uri) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 48 │ │ return await list_mcp_tools(mcp_endpoint.uri, headers) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 49 │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 50 │ async def invoke_tool(self, tool_name: str, kwargs: dict[str, Any]) -> │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ToolInvocationResult: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 51 │ │ tool = await self.tool_store.get_tool(tool_name) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py:56 in list_mcp_tools │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 54 async def list_mcp_tools(endpoint: str, headers: dict[str, str]) -> │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ListToolDefsResponse: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ tools = [] │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 56 │ async with sse_client_wrapper(endpoint, headers) as session: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ tools_result = await session.list_tools() │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 58 │ │ for tool in tools_result.tools: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 59 │ │ │ parameters = [] │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/contextlib.py:199 in __aenter__ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 196 │ │ # they are only needed for recreation, which is not possible anymore │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 197 │ │ del self.args, self.kwds, self.func │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 198 │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 199 │ │ │ return await anext(self.gen) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 200 │ │ except StopAsyncIteration: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 201 │ │ │ raise RuntimeError("generator didn't yield") from None │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 202 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py:39 in sse_client_wrapper │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 36 @asynccontextmanager │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 37 async def sse_client_wrapper(endpoint: str, headers: dict[str, str]): │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 38 │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 39 │ │ async with sse_client(endpoint, headers=headers) as streams: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 40 │ │ │ async with ClientSession(*streams) as session: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 41 │ │ │ │ await session.initialize() │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 42 │ │ │ │ yield session │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/contextlib.py:199 in __aenter__ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 196 │ │ # they are only needed for recreation, which is not possible anymore │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 197 │ │ del self.args, self.kwds, self.func │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 198 │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 199 │ │ │ return await anext(self.gen) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 200 │ │ except StopAsyncIteration: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 201 │ │ │ raise RuntimeError("generator didn't yield") from None │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 202 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/mcp/client/sse.py:54 in sse_client │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 51 │ read_stream_writer, read_stream = anyio.create_memory_object_stream(0) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 52 │ write_stream, write_stream_reader = anyio.create_memory_object_stream(0) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 54 │ async with anyio.create_task_group() as tg: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 56 │ │ │ logger.debug(f"Connecting to SSE endpoint: {remove_request_params(url)}") │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ │ async with httpx_client_factory(headers=headers, auth=auth) as client: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py:772 in __aexit__ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 769 │ │ │ │ │ # added to self._exceptions so it's ok to break exception │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 770 │ │ │ │ │ # chaining and avoid adding a "During handling of above..." │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 771 │ │ │ │ │ # for each nesting level. │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 772 │ │ │ │ │ raise BaseExceptionGroup( │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 773 │ │ │ │ │ │ "unhandled errors in a TaskGroup", self._exceptions │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 774 │ │ │ │ │ ) from None │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 775 │ │ │ │ elif exc_val: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
llamastackdistribution-sample-65465df48d-mslk6 llama-stack ExceptionGroup: unhandled errors in a TaskGroup (1 sub-exception)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack INFO: 127.0.0.1:57472 - "GET /v1/tools?toolgroup_id=mcp%3A%3Atest HTTP/1.1" 500 Internal Server Error
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:16.842 [END] /v1/tools [StatusCode.OK] (218.32ms)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:16.765 [ERROR] Error executing endpoint route='/v1/tools' method='get'
llamastackdistribution-sample-65465df48d-mslk6 llama-stack + Exception Group Traceback (most recent call last):
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py", line 235, in route_handler
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await maybe_await(value)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py", line 172, in maybe_await
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await value
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py", line 103, in async_wrapper
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | result = await method(self, *args, **kwargs)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py", line 56, in list_tools
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | await self._index_tools(toolgroup)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py", line 63, in _index_tools
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | tooldefs_response = await provider_impl.list_runtime_tools(toolgroup.identifier, toolgroup.mcp_endpoint)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py", line 103, in async_wrapper
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | result = await method(self, *args, **kwargs)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py", line 48, in list_runtime_tools
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await list_mcp_tools(mcp_endpoint.uri, headers)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py", line 56, in list_mcp_tools
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with sse_client_wrapper(endpoint, headers) as session:
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in __aenter__
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py", line 39, in sse_client_wrapper
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with sse_client(endpoint, headers=headers) as streams:
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in __aenter__
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/mcp/client/sse.py", line 54, in sse_client
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with anyio.create_task_group() as tg:
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 772, in __aexit__
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise BaseExceptionGroup(
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | exceptiongroup.ExceptionGroup: unhandled errors in a TaskGroup (1 sub-exception)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack +-+---------------- 1 ----------------
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | Traceback (most recent call last):
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 101, in map_httpcore_exceptions
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | yield
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 394, in handle_async_request
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | resp = await self._pool.handle_async_request(req)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 256, in handle_async_request
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise exc from None
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 236, in handle_async_request
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await connection.handle_async_request(
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection.py", line 101, in handle_async_request
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise exc
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection.py", line 78, in handle_async_request
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | stream = await self._connect(request)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection.py", line 124, in _connect
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | stream = await self._network_backend.connect_tcp(**kwargs)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_backends/auto.py", line 31, in connect_tcp
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await self._backend.connect_tcp(
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_backends/anyio.py", line 113, in connect_tcp
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | with map_exceptions(exc_map):
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 153, in __exit__
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | self.gen.throw(typ, value, traceback)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_exceptions.py", line 14, in map_exceptions
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise to_exc(exc) from exc
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | httpcore.ConnectError: All connection attempts failed
llamastackdistribution-sample-65465df48d-mslk6 llama-stack |
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | The above exception was the direct cause of the following exception:
llamastackdistribution-sample-65465df48d-mslk6 llama-stack |
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | Traceback (most recent call last):
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/mcp/client/sse.py", line 58, in sse_client
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with aconnect_sse(
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in __aenter__
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx_sse/_api.py", line 69, in aconnect_sse
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with client.stream(method, url, headers=headers, **kwargs) as response:
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in __aenter__
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1583, in stream
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self.send(
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1629, in send
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self._send_handling_auth(
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1657, in _send_handling_auth
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self._send_handling_redirects(
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1694, in _send_handling_redirects
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self._send_single_request(request)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1730, in _send_single_request
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await transport.handle_async_request(request)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 393, in handle_async_request
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | with map_httpcore_exceptions():
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 153, in __exit__
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | self.gen.throw(typ, value, traceback)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 118, in map_httpcore_exceptions
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise mapped_exc(message) from exc
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | httpx.ConnectError: All connection attempts failed
llamastackdistribution-sample-65465df48d-mslk6 llama-stack +------------------------------------
llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:17.586 [START] /v1/tools
llamastackdistribution-sample-65465df48d-mslk6 llama-stack ERROR 2025-06-27 09:59:17,608 __main__:237 server: Error executing endpoint route='/v1/tools' method='get'
llamastackdistribution-sample-65465df48d-mslk6 llama-stack ╭───────────────────────────────────── Traceback (most recent call last) ─────────────────────────────────────╮
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py:235 in route_handler │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 232 │ │ │ │ │ return StreamingResponse(gen, media_type="text/event-stream") │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 233 │ │ │ │ else: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 234 │ │ │ │ │ value = func(**kwargs) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 235 │ │ │ │ │ return await maybe_await(value) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 236 │ │ │ except Exception as e: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 237 │ │ │ │ logger.exception(f"Error executing endpoint {route=} {method=}") │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 238 │ │ │ │ raise translate_exception(e) from e │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py:172 in maybe_await │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 169 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 170 async def maybe_await(value): │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 171 │ if inspect.iscoroutine(value): │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 172 │ │ return await value │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 173 │ return value │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 174 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 175 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py:103 in │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ async_wrapper │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 100 │ │ │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 101 │ │ │ with tracing.span(f"{class_name}.{method_name}", span_attributes) as span: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 102 │ │ │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 103 │ │ │ │ │ result = await method(self, *args, **kwargs) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 104 │ │ │ │ │ span.set_attribute("output", serialize_value(result)) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 105 │ │ │ │ │ return result │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 106 │ │ │ │ except Exception as e: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py:56 in │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ list_tools │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │ │ all_tools = [] │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 54 │ │ for toolgroup in toolgroups: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ │ │ if toolgroup.identifier not in self.toolgroups_to_tools: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 56 │ │ │ │ await self._index_tools(toolgroup) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ │ all_tools.extend(self.toolgroups_to_tools[toolgroup.identifier]) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 58 │ │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 59 │ │ return ListToolsResponse(data=all_tools) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py:63 in │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ _index_tools │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 60 │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 61 │ async def _index_tools(self, toolgroup: ToolGroup): │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 62 │ │ provider_impl = super().get_provider_impl(toolgroup.identifier, │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ toolgroup.provider_id) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 63 │ │ tooldefs_response = await provider_impl.list_runtime_tools(toolgroup.identifier, │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ toolgroup.mcp_endpoint) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 64 │ │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 65 │ │ # TODO: kill this Tool vs ToolDef distinction │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 66 │ │ tooldefs = tooldefs_response.data │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py:103 in │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ async_wrapper │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 100 │ │ │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 101 │ │ │ with tracing.span(f"{class_name}.{method_name}", span_attributes) as span: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 102 │ │ │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 103 │ │ │ │ │ result = await method(self, *args, **kwargs) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 104 │ │ │ │ │ span.set_attribute("output", serialize_value(result)) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 105 │ │ │ │ │ return result │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 106 │ │ │ │ except Exception as e: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/remote/tool_runtime/model_context_protocol/mo │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ del_context_protocol.py:48 in list_runtime_tools │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 45 │ │ if mcp_endpoint is None: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 46 │ │ │ raise ValueError("mcp_endpoint is required") │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 47 │ │ headers = await self.get_headers_from_request(mcp_endpoint.uri) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 48 │ │ return await list_mcp_tools(mcp_endpoint.uri, headers) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 49 │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 50 │ async def invoke_tool(self, tool_name: str, kwargs: dict[str, Any]) -> │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ToolInvocationResult: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 51 │ │ tool = await self.tool_store.get_tool(tool_name) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py:56 in list_mcp_tools │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 54 async def list_mcp_tools(endpoint: str, headers: dict[str, str]) -> │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ListToolDefsResponse: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ tools = [] │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 56 │ async with sse_client_wrapper(endpoint, headers) as session: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ tools_result = await session.list_tools() │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 58 │ │ for tool in tools_result.tools: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 59 │ │ │ parameters = [] │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/contextlib.py:199 in __aenter__ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 196 │ │ # they are only needed for recreation, which is not possible anymore │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 197 │ │ del self.args, self.kwds, self.func │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 198 │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 199 │ │ │ return await anext(self.gen) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 200 │ │ except StopAsyncIteration: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 201 │ │ │ raise RuntimeError("generator didn't yield") from None │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 202 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py:39 in sse_client_wrapper │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 36 @asynccontextmanager │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 37 async def sse_client_wrapper(endpoint: str, headers: dict[str, str]): │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 38 │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 39 │ │ async with sse_client(endpoint, headers=headers) as streams: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 40 │ │ │ async with ClientSession(*streams) as session: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 41 │ │ │ │ await session.initialize() │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 42 │ │ │ │ yield session │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/contextlib.py:199 in __aenter__ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 196 │ │ # they are only needed for recreation, which is not possible anymore │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 197 │ │ del self.args, self.kwds, self.func │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 198 │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 199 │ │ │ return await anext(self.gen) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 200 │ │ except StopAsyncIteration: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 201 │ │ │ raise RuntimeError("generator didn't yield") from None │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 202 │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/mcp/client/sse.py:54 in sse_client │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 51 │ read_stream_writer, read_stream = anyio.create_memory_object_stream(0) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 52 │ write_stream, write_stream_reader = anyio.create_memory_object_stream(0) │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 54 │ async with anyio.create_task_group() as tg: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ │ try: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 56 │ │ │ logger.debug(f"Connecting to SSE endpoint: {remove_request_params(url)}") │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ │ async with httpx_client_factory(headers=headers, auth=auth) as client: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py:772 in __aexit__ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 769 │ │ │ │ │ # added to self._exceptions so it's ok to break exception │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 770 │ │ │ │ │ # chaining and avoid adding a "During handling of above..." │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 771 │ │ │ │ │ # for each nesting level. │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 772 │ │ │ │ │ raise BaseExceptionGroup( │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 773 │ │ │ │ │ │ "unhandled errors in a TaskGroup", self._exceptions │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 774 │ │ │ │ │ ) from None │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 775 │ │ │ │ elif exc_val: │
llamastackdistribution-sample-65465df48d-mslk6 llama-stack ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
llamastackdistribution-sample-65465df48d-mslk6 llama-stack ExceptionGroup: unhandled errors in a TaskGroup (1 sub-exception)
llamastackdistribution-sample-65465df48d-mslk6 llama-stack INFO: 127.0.0.1:57472 - "GET /v1/tools?toolgroup_id=mcp%3A%3Atest HTTP/1.1" 500 Internal Server Error
llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py", line 48, in list_runtime_tools llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await list_mcp_tools(mcp_endpoint.uri, headers) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py", line 56, in list_mcp_tools llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with sse_client_wrapper(endpoint, headers) as session: llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in aenter llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py", line 39, in sse_client_wrapper llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with sse_client(endpoint, headers=headers) as streams: llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in aenter llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/mcp/client/sse.py", line 54, in sse_client llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with anyio.create_task_group() as tg: llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 772, in aexit llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise BaseExceptionGroup( llamastackdistribution-sample-65465df48d-mslk6 llama-stack | exceptiongroup.ExceptionGroup: unhandled errors in a TaskGroup (1 sub-exception) llamastackdistribution-sample-65465df48d-mslk6 llama-stack +-+---------------- 1 ---------------- llamastackdistribution-sample-65465df48d-mslk6 llama-stack | Traceback (most recent call last): llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 101, in map_httpcore_exceptions llamastackdistribution-sample-65465df48d-mslk6 llama-stack | yield llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 394, in handle_async_request llamastackdistribution-sample-65465df48d-mslk6 llama-stack | resp = await self._pool.handle_async_request(req) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 256, in handle_async_request llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise exc from None llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 236, in handle_async_request llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await connection.handle_async_request( llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection.py", line 101, in handle_async_request llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise exc llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection.py", line 78, in handle_async_request llamastackdistribution-sample-65465df48d-mslk6 llama-stack | stream = await self._connect(request) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection.py", line 124, in _connect llamastackdistribution-sample-65465df48d-mslk6 llama-stack | stream = await self._network_backend.connect_tcp(**kwargs) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_backends/auto.py", line 31, in connect_tcp llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await self._backend.connect_tcp( llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_backends/anyio.py", line 113, in connect_tcp llamastackdistribution-sample-65465df48d-mslk6 llama-stack | with map_exceptions(exc_map): llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 153, in exit llamastackdistribution-sample-65465df48d-mslk6 llama-stack | self.gen.throw(typ, value, traceback) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_exceptions.py", line 14, in map_exceptions llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise to_exc(exc) from exc llamastackdistribution-sample-65465df48d-mslk6 llama-stack | httpcore.ConnectError: All connection attempts failed llamastackdistribution-sample-65465df48d-mslk6 llama-stack | llamastackdistribution-sample-65465df48d-mslk6 llama-stack | The above exception was the direct cause of the following exception: llamastackdistribution-sample-65465df48d-mslk6 llama-stack | llamastackdistribution-sample-65465df48d-mslk6 llama-stack | Traceback (most recent call last): llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/mcp/client/sse.py", line 58, in sse_client llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with aconnect_sse( llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in aenter llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx_sse/_api.py", line 69, in aconnect_sse llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with client.stream(method, url, headers=headers, **kwargs) as response: llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in aenter llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1583, in stream llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self.send( llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1629, in send llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self._send_handling_auth( llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1657, in _send_handling_auth llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self._send_handling_redirects( llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1694, in _send_handling_redirects llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self._send_single_request(request) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1730, in _send_single_request llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await transport.handle_async_request(request) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 393, in handle_async_request llamastackdistribution-sample-65465df48d-mslk6 llama-stack | with map_httpcore_exceptions(): llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 153, in exit llamastackdistribution-sample-65465df48d-mslk6 llama-stack | self.gen.throw(typ, value, traceback) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 118, in map_httpcore_exceptions llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise mapped_exc(message) from exc llamastackdistribution-sample-65465df48d-mslk6 llama-stack | httpx.ConnectError: All connection attempts failed llamastackdistribution-sample-65465df48d-mslk6 llama-stack +------------------------------------ llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:16.624 [START] /v1/tools llamastackdistribution-sample-65465df48d-mslk6 llama-stack ERROR 2025-06-27 09:59:16,643 main:237 server: Error executing endpoint route='/v1/tools' method='get' llamastackdistribution-sample-65465df48d-mslk6 llama-stack ╭───────────────────────────────────── Traceback (most recent call last) ─────────────────────────────────────╮ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py:235 in route_handler │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 232 │ │ │ │ │ return StreamingResponse(gen, media_type="text/event-stream") │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 233 │ │ │ │ else: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 234 │ │ │ │ │ value = func(**kwargs) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 235 │ │ │ │ │ return await maybe_await(value) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 236 │ │ │ except Exception as e: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 237 │ │ │ │ logger.exception(f"Error executing endpoint {route=} {method=}") │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 238 │ │ │ │ raise translate_exception(e) from e │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py:172 in maybe_await │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 169 │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 170 async def maybe_await(value): │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 171 │ if inspect.iscoroutine(value): │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 172 │ │ return await value │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 173 │ return value │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 174 │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 175 │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py:103 in │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ async_wrapper │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 100 │ │ │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 101 │ │ │ with tracing.span(f"{class_name}.{method_name}", span_attributes) as span: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 102 │ │ │ │ try: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 103 │ │ │ │ │ result = await method(self, *args, **kwargs) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 104 │ │ │ │ │ span.set_attribute("output", serialize_value(result)) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 105 │ │ │ │ │ return result │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 106 │ │ │ │ except Exception as e: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py:56 in │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ list_tools │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │ │ all_tools = [] │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 54 │ │ for toolgroup in toolgroups: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ │ │ if toolgroup.identifier not in self.toolgroups_to_tools: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 56 │ │ │ │ await self._index_tools(toolgroup) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ │ all_tools.extend(self.toolgroups_to_tools[toolgroup.identifier]) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 58 │ │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 59 │ │ return ListToolsResponse(data=all_tools) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py:63 in │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ _index_tools │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 60 │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 61 │ async def _index_tools(self, toolgroup: ToolGroup): │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 62 │ │ provider_impl = super().get_provider_impl(toolgroup.identifier, │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ toolgroup.provider_id) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 63 │ │ tooldefs_response = await provider_impl.list_runtime_tools(toolgroup.identifier, │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ toolgroup.mcp_endpoint) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 64 │ │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 65 │ │ # TODO: kill this Tool vs ToolDef distinction │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 66 │ │ tooldefs = tooldefs_response.data │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py:103 in │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ async_wrapper │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 100 │ │ │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 101 │ │ │ with tracing.span(f"{class_name}.{method_name}", span_attributes) as span: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 102 │ │ │ │ try: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 103 │ │ │ │ │ result = await method(self, *args, **kwargs) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 104 │ │ │ │ │ span.set_attribute("output", serialize_value(result)) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 105 │ │ │ │ │ return result │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 106 │ │ │ │ except Exception as e: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/remote/tool_runtime/model_context_protocol/mo │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ del_context_protocol.py:48 in list_runtime_tools │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 45 │ │ if mcp_endpoint is None: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 46 │ │ │ raise ValueError("mcp_endpoint is required") │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 47 │ │ headers = await self.get_headers_from_request(mcp_endpoint.uri) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 48 │ │ return await list_mcp_tools(mcp_endpoint.uri, headers) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 49 │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 50 │ async def invoke_tool(self, tool_name: str, kwargs: dict[str, Any]) -> │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ToolInvocationResult: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 51 │ │ tool = await self.tool_store.get_tool(tool_name) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py:56 in list_mcp_tools │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 54 async def list_mcp_tools(endpoint: str, headers: dict[str, str]) -> │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ListToolDefsResponse: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ tools = [] │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 56 │ async with sse_client_wrapper(endpoint, headers) as session: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ tools_result = await session.list_tools() │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 58 │ │ for tool in tools_result.tools: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 59 │ │ │ parameters = [] │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/contextlib.py:199 in aenter │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 196 │ │ # they are only needed for recreation, which is not possible anymore │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 197 │ │ del self.args, self.kwds, self.func │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 198 │ │ try: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 199 │ │ │ return await anext(self.gen) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 200 │ │ except StopAsyncIteration: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 201 │ │ │ raise RuntimeError("generator didn't yield") from None │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 202 │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py:39 in sse_client_wrapper │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 36 @asynccontextmanager │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 37 async def sse_client_wrapper(endpoint: str, headers: dict[str, str]): │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 38 │ try: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 39 │ │ async with sse_client(endpoint, headers=headers) as streams: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 40 │ │ │ async with ClientSession(*streams) as session: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 41 │ │ │ │ await session.initialize() │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 42 │ │ │ │ yield session │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/contextlib.py:199 in aenter │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 196 │ │ # they are only needed for recreation, which is not possible anymore │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 197 │ │ del self.args, self.kwds, self.func │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 198 │ │ try: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 199 │ │ │ return await anext(self.gen) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 200 │ │ except StopAsyncIteration: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 201 │ │ │ raise RuntimeError("generator didn't yield") from None │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 202 │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/mcp/client/sse.py:54 in sse_client │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 51 │ read_stream_writer, read_stream = anyio.create_memory_object_stream(0) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 52 │ write_stream, write_stream_reader = anyio.create_memory_object_stream(0) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 54 │ async with anyio.create_task_group() as tg: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ │ try: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 56 │ │ │ logger.debug(f"Connecting to SSE endpoint: {remove_request_params(url)}") │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ │ async with httpx_client_factory(headers=headers, auth=auth) as client: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py:772 in aexit │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 769 │ │ │ │ │ # added to self._exceptions so it's ok to break exception │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 770 │ │ │ │ │ # chaining and avoid adding a "During handling of above..." │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 771 │ │ │ │ │ # for each nesting level. │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 772 │ │ │ │ │ raise BaseExceptionGroup( │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 773 │ │ │ │ │ │ "unhandled errors in a TaskGroup", self._exceptions │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 774 │ │ │ │ │ ) from None │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 775 │ │ │ │ elif exc_val: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ llamastackdistribution-sample-65465df48d-mslk6 llama-stack ExceptionGroup: unhandled errors in a TaskGroup (1 sub-exception) llamastackdistribution-sample-65465df48d-mslk6 llama-stack INFO: 127.0.0.1:57472 - "GET /v1/tools?toolgroup_id=mcp%3A%3Atest HTTP/1.1" 500 Internal Server Error llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:16.842 [END] /v1/tools [StatusCode.OK] (218.32ms) llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:16.765 [ERROR] Error executing endpoint route='/v1/tools' method='get' llamastackdistribution-sample-65465df48d-mslk6 llama-stack + Exception Group Traceback (most recent call last): llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py", line 235, in route_handler llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await maybe_await(value) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py", line 172, in maybe_await llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await value llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py", line 103, in async_wrapper llamastackdistribution-sample-65465df48d-mslk6 llama-stack | result = await method(self, *args, **kwargs) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py", line 56, in list_tools llamastackdistribution-sample-65465df48d-mslk6 llama-stack | await self._index_tools(toolgroup) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py", line 63, in _index_tools llamastackdistribution-sample-65465df48d-mslk6 llama-stack | tooldefs_response = await provider_impl.list_runtime_tools(toolgroup.identifier, toolgroup.mcp_endpoint) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py", line 103, in async_wrapper llamastackdistribution-sample-65465df48d-mslk6 llama-stack | result = await method(self, *args, **kwargs) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py", line 48, in list_runtime_tools llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await list_mcp_tools(mcp_endpoint.uri, headers) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py", line 56, in list_mcp_tools llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with sse_client_wrapper(endpoint, headers) as session: llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in aenter llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py", line 39, in sse_client_wrapper llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with sse_client(endpoint, headers=headers) as streams: llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in aenter llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/mcp/client/sse.py", line 54, in sse_client llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with anyio.create_task_group() as tg: llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 772, in aexit llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise BaseExceptionGroup( llamastackdistribution-sample-65465df48d-mslk6 llama-stack | exceptiongroup.ExceptionGroup: unhandled errors in a TaskGroup (1 sub-exception) llamastackdistribution-sample-65465df48d-mslk6 llama-stack +-+---------------- 1 ---------------- llamastackdistribution-sample-65465df48d-mslk6 llama-stack | Traceback (most recent call last): llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 101, in map_httpcore_exceptions llamastackdistribution-sample-65465df48d-mslk6 llama-stack | yield llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 394, in handle_async_request llamastackdistribution-sample-65465df48d-mslk6 llama-stack | resp = await self._pool.handle_async_request(req) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 256, in handle_async_request llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise exc from None llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection_pool.py", line 236, in handle_async_request llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await connection.handle_async_request( llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection.py", line 101, in handle_async_request llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise exc llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection.py", line 78, in handle_async_request llamastackdistribution-sample-65465df48d-mslk6 llama-stack | stream = await self._connect(request) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_async/connection.py", line 124, in _connect llamastackdistribution-sample-65465df48d-mslk6 llama-stack | stream = await self._network_backend.connect_tcp(**kwargs) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_backends/auto.py", line 31, in connect_tcp llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await self._backend.connect_tcp( llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_backends/anyio.py", line 113, in connect_tcp llamastackdistribution-sample-65465df48d-mslk6 llama-stack | with map_exceptions(exc_map): llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 153, in exit llamastackdistribution-sample-65465df48d-mslk6 llama-stack | self.gen.throw(typ, value, traceback) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpcore/_exceptions.py", line 14, in map_exceptions llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise to_exc(exc) from exc llamastackdistribution-sample-65465df48d-mslk6 llama-stack | httpcore.ConnectError: All connection attempts failed llamastackdistribution-sample-65465df48d-mslk6 llama-stack | llamastackdistribution-sample-65465df48d-mslk6 llama-stack | The above exception was the direct cause of the following exception: llamastackdistribution-sample-65465df48d-mslk6 llama-stack | llamastackdistribution-sample-65465df48d-mslk6 llama-stack | Traceback (most recent call last): llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/mcp/client/sse.py", line 58, in sse_client llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with aconnect_sse( llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in aenter llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx_sse/_api.py", line 69, in aconnect_sse llamastackdistribution-sample-65465df48d-mslk6 llama-stack | async with client.stream(method, url, headers=headers, **kwargs) as response: llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 199, in aenter llamastackdistribution-sample-65465df48d-mslk6 llama-stack | return await anext(self.gen) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1583, in stream llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self.send( llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1629, in send llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self._send_handling_auth( llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1657, in _send_handling_auth llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self._send_handling_redirects( llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1694, in _send_handling_redirects llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await self._send_single_request(request) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_client.py", line 1730, in _send_single_request llamastackdistribution-sample-65465df48d-mslk6 llama-stack | response = await transport.handle_async_request(request) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 393, in handle_async_request llamastackdistribution-sample-65465df48d-mslk6 llama-stack | with map_httpcore_exceptions(): llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/contextlib.py", line 153, in exit llamastackdistribution-sample-65465df48d-mslk6 llama-stack | self.gen.throw(typ, value, traceback) llamastackdistribution-sample-65465df48d-mslk6 llama-stack | File "/usr/local/lib/python3.10/site-packages/httpx/_transports/default.py", line 118, in map_httpcore_exceptions llamastackdistribution-sample-65465df48d-mslk6 llama-stack | raise mapped_exc(message) from exc llamastackdistribution-sample-65465df48d-mslk6 llama-stack | httpx.ConnectError: All connection attempts failed llamastackdistribution-sample-65465df48d-mslk6 llama-stack +------------------------------------ llamastackdistribution-sample-65465df48d-mslk6 llama-stack 09:59:17.586 [START] /v1/tools llamastackdistribution-sample-65465df48d-mslk6 llama-stack ERROR 2025-06-27 09:59:17,608 main:237 server: Error executing endpoint route='/v1/tools' method='get' llamastackdistribution-sample-65465df48d-mslk6 llama-stack ╭───────────────────────────────────── Traceback (most recent call last) ─────────────────────────────────────╮ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py:235 in route_handler │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 232 │ │ │ │ │ return StreamingResponse(gen, media_type="text/event-stream") │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 233 │ │ │ │ else: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 234 │ │ │ │ │ value = func(**kwargs) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 235 │ │ │ │ │ return await maybe_await(value) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 236 │ │ │ except Exception as e: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 237 │ │ │ │ logger.exception(f"Error executing endpoint {route=} {method=}") │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 238 │ │ │ │ raise translate_exception(e) from e │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/server/server.py:172 in maybe_await │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 169 │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 170 async def maybe_await(value): │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 171 │ if inspect.iscoroutine(value): │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 172 │ │ return await value │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 173 │ return value │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 174 │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 175 │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py:103 in │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ async_wrapper │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 100 │ │ │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 101 │ │ │ with tracing.span(f"{class_name}.{method_name}", span_attributes) as span: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 102 │ │ │ │ try: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 103 │ │ │ │ │ result = await method(self, *args, **kwargs) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 104 │ │ │ │ │ span.set_attribute("output", serialize_value(result)) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 105 │ │ │ │ │ return result │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 106 │ │ │ │ except Exception as e: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py:56 in │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ list_tools │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │ │ all_tools = [] │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 54 │ │ for toolgroup in toolgroups: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ │ │ if toolgroup.identifier not in self.toolgroups_to_tools: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 56 │ │ │ │ await self._index_tools(toolgroup) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ │ all_tools.extend(self.toolgroups_to_tools[toolgroup.identifier]) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 58 │ │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 59 │ │ return ListToolsResponse(data=all_tools) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/distribution/routing_tables/toolgroups.py:63 in │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ _index_tools │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 60 │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 61 │ async def _index_tools(self, toolgroup: ToolGroup): │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 62 │ │ provider_impl = super().get_provider_impl(toolgroup.identifier, │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ toolgroup.provider_id) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 63 │ │ tooldefs_response = await provider_impl.list_runtime_tools(toolgroup.identifier, │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ toolgroup.mcp_endpoint) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 64 │ │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 65 │ │ # TODO: kill this Tool vs ToolDef distinction │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 66 │ │ tooldefs = tooldefs_response.data │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/telemetry/trace_protocol.py:103 in │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ async_wrapper │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 100 │ │ │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 101 │ │ │ with tracing.span(f"{class_name}.{method_name}", span_attributes) as span: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 102 │ │ │ │ try: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 103 │ │ │ │ │ result = await method(self, *args, **kwargs) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 104 │ │ │ │ │ span.set_attribute("output", serialize_value(result)) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 105 │ │ │ │ │ return result │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 106 │ │ │ │ except Exception as e: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/remote/tool_runtime/model_context_protocol/mo │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ del_context_protocol.py:48 in list_runtime_tools │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 45 │ │ if mcp_endpoint is None: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 46 │ │ │ raise ValueError("mcp_endpoint is required") │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 47 │ │ headers = await self.get_headers_from_request(mcp_endpoint.uri) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 48 │ │ return await list_mcp_tools(mcp_endpoint.uri, headers) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 49 │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 50 │ async def invoke_tool(self, tool_name: str, kwargs: dict[str, Any]) -> │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ToolInvocationResult: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 51 │ │ tool = await self.tool_store.get_tool(tool_name) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py:56 in list_mcp_tools │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 54 async def list_mcp_tools(endpoint: str, headers: dict[str, str]) -> │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ListToolDefsResponse: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ tools = [] │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 56 │ async with sse_client_wrapper(endpoint, headers) as session: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ tools_result = await session.list_tools() │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 58 │ │ for tool in tools_result.tools: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 59 │ │ │ parameters = [] │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/contextlib.py:199 in aenter │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 196 │ │ # they are only needed for recreation, which is not possible anymore │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 197 │ │ del self.args, self.kwds, self.func │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 198 │ │ try: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 199 │ │ │ return await anext(self.gen) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 200 │ │ except StopAsyncIteration: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 201 │ │ │ raise RuntimeError("generator didn't yield") from None │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 202 │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/llama_stack/providers/utils/tools/mcp.py:39 in sse_client_wrapper │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 36 @asynccontextmanager │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 37 async def sse_client_wrapper(endpoint: str, headers: dict[str, str]): │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 38 │ try: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 39 │ │ async with sse_client(endpoint, headers=headers) as streams: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 40 │ │ │ async with ClientSession(*streams) as session: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 41 │ │ │ │ await session.initialize() │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 42 │ │ │ │ yield session │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/contextlib.py:199 in aenter │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 196 │ │ # they are only needed for recreation, which is not possible anymore │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 197 │ │ del self.args, self.kwds, self.func │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 198 │ │ try: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 199 │ │ │ return await anext(self.gen) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 200 │ │ except StopAsyncIteration: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 201 │ │ │ raise RuntimeError("generator didn't yield") from None │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 202 │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/mcp/client/sse.py:54 in sse_client │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 51 │ read_stream_writer, read_stream = anyio.create_memory_object_stream(0) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 52 │ write_stream, write_stream_reader = anyio.create_memory_object_stream(0) │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 53 │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 54 │ async with anyio.create_task_group() as tg: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 55 │ │ try: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 56 │ │ │ logger.debug(f"Connecting to SSE endpoint: {remove_request_params(url)}") │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 57 │ │ │ async with httpx_client_factory(headers=headers, auth=auth) as client: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ /usr/local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py:772 in aexit │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 769 │ │ │ │ │ # added to self._exceptions so it's ok to break exception │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 770 │ │ │ │ │ # chaining and avoid adding a "During handling of above..." │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 771 │ │ │ │ │ # for each nesting level. │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ ❱ 772 │ │ │ │ │ raise BaseExceptionGroup( │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 773 │ │ │ │ │ │ "unhandled errors in a TaskGroup", self._exceptions │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 774 │ │ │ │ │ ) from None │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack │ 775 │ │ │ │ elif exc_val: │ llamastackdistribution-sample-65465df48d-mslk6 llama-stack ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ llamastackdistribution-sample-65465df48d-mslk6 llama-stack ExceptionGroup: unhandled errors in a TaskGroup (1 sub-exception) llamastackdistribution-sample-65465df48d-mslk6 llama-stack INFO: 127.0.0.1:57472 - "GET /v1/tools?toolgroup_id=mcp%3A%3Atest HTTP/1.1" 500 Internal Server Error