@@ -943,10 +943,34 @@ The streamable HTTP transport supports:
943943
944944### Mounting to an Existing ASGI Server
945945
946- > ** Note** : SSE transport is being superseded by [ Streamable HTTP transport] ( https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http ) .
947-
948946By default, SSE servers are mounted at ` /sse ` and Streamable HTTP servers are mounted at ` /mcp ` . You can customize these paths using the methods described below.
949947
948+ #### Streamable HTTP servers
949+
950+ The following example shows how to use ` streamable_http_app() ` , a method that returns a ` Starlette ` application object.
951+ You can then append additional routes to that application as needed.
952+
953+ ``` python
954+ mcp = FastMCP(" My App" )
955+
956+ app = mcp.streamable_http_app()
957+ # Additional non-MCP routes can be added like so:
958+ # from starlette.routing import Route
959+ # app.router.routes.append(Route("/", endpoint=other_route_function))
960+ ```
961+
962+ To customize the route from the default of "/mcp", either specify the ` streamable_http_path ` option for the ` FastMCP ` constructor,
963+ or set ` FASTMCP_STREAMABLE_HTTP_PATH ` environment variable.
964+
965+ Note that in Starlette and FastAPI (which is based on Starlette), the "/mcp" route will redirect to "/mcp/",
966+ so you may need to use "/mcp/" when pointing MCP clients at your servers.
967+
968+ For more information on mounting applications in Starlette, see the [ Starlette documentation] ( https://www.starlette.io/routing/#submounting-routes ) .
969+
970+ #### SSE servers
971+
972+ > ** Note** : SSE transport is being superseded by [ Streamable HTTP transport] ( https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http ) .
973+
950974You can mount the SSE server to an existing ASGI server using the ` sse_app ` method. This allows you to integrate the SSE server with other ASGI applications.
951975
952976``` python
@@ -1013,17 +1037,44 @@ For more information on mounting applications in Starlette, see the [Starlette d
10131037
10141038For more control, you can use the low-level server implementation directly. This gives you full access to the protocol and allows you to customize every aspect of your server, including lifecycle management through the lifespan API:
10151039
1040+ <!-- snippet-source examples/snippets/servers/lowlevel/lifespan.py -->
10161041``` python
1017- from contextlib import asynccontextmanager
1042+ """
1043+ Run from the repository root:
1044+ uv run examples/snippets/servers/lowlevel/lifespan.py
1045+ """
1046+
10181047from collections.abc import AsyncIterator
1048+ from contextlib import asynccontextmanager
1049+
1050+ import mcp.server.stdio
1051+ import mcp.types as types
1052+ from mcp.server.lowlevel import NotificationOptions, Server
1053+ from mcp.server.models import InitializationOptions
1054+
1055+
1056+ # Mock database class for example
1057+ class Database :
1058+ """ Mock database class for example."""
10191059
1020- from fake_database import Database # Replace with your actual DB type
1060+ @ classmethod
1061+ async def connect (cls ) -> " Database" :
1062+ """ Connect to database."""
1063+ print (" Database connected" )
1064+ return cls ()
10211065
1022- from mcp.server import Server
1066+ async def disconnect (self ) -> None :
1067+ """ Disconnect from database."""
1068+ print (" Database disconnected" )
1069+
1070+ async def query (self , query_str : str ) -> list[dict[str , str ]]:
1071+ """ Execute a query."""
1072+ # Simulate database query
1073+ return [{" id" : " 1" , " name" : " Example" , " query" : query_str}]
10231074
10241075
10251076@asynccontextmanager
1026- async def server_lifespan (server : Server) -> AsyncIterator[dict ]:
1077+ async def server_lifespan (_server : Server) -> AsyncIterator[dict ]:
10271078 """ Manage server startup and shutdown lifecycle."""
10281079 # Initialize resources on startup
10291080 db = await Database.connect()
@@ -1038,21 +1089,79 @@ async def server_lifespan(server: Server) -> AsyncIterator[dict]:
10381089server = Server(" example-server" , lifespan = server_lifespan)
10391090
10401091
1041- # Access lifespan context in handlers
1092+ @server.list_tools ()
1093+ async def handle_list_tools () -> list[types.Tool]:
1094+ """ List available tools."""
1095+ return [
1096+ types.Tool(
1097+ name = " query_db" ,
1098+ description = " Query the database" ,
1099+ inputSchema = {
1100+ " type" : " object" ,
1101+ " properties" : {" query" : {" type" : " string" , " description" : " SQL query to execute" }},
1102+ " required" : [" query" ],
1103+ },
1104+ )
1105+ ]
1106+
1107+
10421108@server.call_tool ()
1043- async def query_db (name : str , arguments : dict ) -> list :
1109+ async def query_db (name : str , arguments : dict ) -> list[types.TextContent]:
1110+ """ Handle database query tool call."""
1111+ if name != " query_db" :
1112+ raise ValueError (f " Unknown tool: { name} " )
1113+
1114+ # Access lifespan context
10441115 ctx = server.request_context
10451116 db = ctx.lifespan_context[" db" ]
1046- return await db.query(arguments[" query" ])
1117+
1118+ # Execute query
1119+ results = await db.query(arguments[" query" ])
1120+
1121+ return [types.TextContent(type = " text" , text = f " Query results: { results} " )]
1122+
1123+
1124+ async def run ():
1125+ """ Run the server with lifespan management."""
1126+ async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
1127+ await server.run(
1128+ read_stream,
1129+ write_stream,
1130+ InitializationOptions(
1131+ server_name = " example-server" ,
1132+ server_version = " 0.1.0" ,
1133+ capabilities = server.get_capabilities(
1134+ notification_options = NotificationOptions(),
1135+ experimental_capabilities = {},
1136+ ),
1137+ ),
1138+ )
1139+
1140+
1141+ if __name__ == " __main__" :
1142+ import asyncio
1143+
1144+ asyncio.run(run())
10471145```
10481146
1147+ _ Full example: [ examples/snippets/servers/lowlevel/lifespan.py] ( https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lowlevel/lifespan.py ) _
1148+ <!-- /snippet-source -->
1149+
10491150The lifespan API provides:
10501151
10511152- A way to initialize resources when the server starts and clean them up when it stops
10521153- Access to initialized resources through the request context in handlers
10531154- Type-safe context passing between lifespan and request handlers
10541155
1156+ <!-- snippet-source examples/snippets/servers/lowlevel/basic.py -->
10551157``` python
1158+ """
1159+ Run from the repository root:
1160+ uv run examples/snippets/servers/lowlevel/basic.py
1161+ """
1162+
1163+ import asyncio
1164+
10561165import mcp.server.stdio
10571166import mcp.types as types
10581167from mcp.server.lowlevel import NotificationOptions, Server
@@ -1064,38 +1173,37 @@ server = Server("example-server")
10641173
10651174@server.list_prompts ()
10661175async def handle_list_prompts () -> list[types.Prompt]:
1176+ """ List available prompts."""
10671177 return [
10681178 types.Prompt(
10691179 name = " example-prompt" ,
10701180 description = " An example prompt template" ,
1071- arguments = [
1072- types.PromptArgument(
1073- name = " arg1" , description = " Example argument" , required = True
1074- )
1075- ],
1181+ arguments = [types.PromptArgument(name = " arg1" , description = " Example argument" , required = True )],
10761182 )
10771183 ]
10781184
10791185
10801186@server.get_prompt ()
1081- async def handle_get_prompt (
1082- name : str , arguments : dict[str , str ] | None
1083- ) -> types.GetPromptResult:
1187+ async def handle_get_prompt (name : str , arguments : dict[str , str ] | None ) -> types.GetPromptResult:
1188+ """ Get a specific prompt by name."""
10841189 if name != " example-prompt" :
10851190 raise ValueError (f " Unknown prompt: { name} " )
10861191
1192+ arg1_value = (arguments or {}).get(" arg1" , " default" )
1193+
10871194 return types.GetPromptResult(
10881195 description = " Example prompt" ,
10891196 messages = [
10901197 types.PromptMessage(
10911198 role = " user" ,
1092- content = types.TextContent(type = " text" , text = " Example prompt text" ),
1199+ content = types.TextContent(type = " text" , text = f " Example prompt text with argument: { arg1_value } " ),
10931200 )
10941201 ],
10951202 )
10961203
10971204
10981205async def run ():
1206+ """ Run the basic low-level server."""
10991207 async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
11001208 await server.run(
11011209 read_stream,
@@ -1112,67 +1220,108 @@ async def run():
11121220
11131221
11141222if __name__ == " __main__" :
1115- import asyncio
1116-
11171223 asyncio.run(run())
11181224```
11191225
1226+ _ Full example: [ examples/snippets/servers/lowlevel/basic.py] ( https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lowlevel/basic.py ) _
1227+ <!-- /snippet-source -->
1228+
11201229Caution: The ` uv run mcp run ` and ` uv run mcp dev ` tool doesn't support low-level server.
11211230
11221231#### Structured Output Support
11231232
11241233The low-level server supports structured output for tools, allowing you to return both human-readable content and machine-readable structured data. Tools can define an ` outputSchema ` to validate their structured output:
11251234
1235+ <!-- snippet-source examples/snippets/servers/lowlevel/structured_output.py -->
11261236``` python
1127- from types import Any
1237+ """
1238+ Run from the repository root:
1239+ uv run examples/snippets/servers/lowlevel/structured_output.py
1240+ """
1241+
1242+ import asyncio
1243+ from typing import Any
11281244
1245+ import mcp.server.stdio
11291246import mcp.types as types
1130- from mcp.server.lowlevel import Server
1247+ from mcp.server.lowlevel import NotificationOptions, Server
1248+ from mcp.server.models import InitializationOptions
11311249
11321250server = Server(" example-server" )
11331251
11341252
11351253@server.list_tools ()
11361254async def list_tools () -> list[types.Tool]:
1255+ """ List available tools with structured output schemas."""
11371256 return [
11381257 types.Tool(
1139- name = " calculate " ,
1140- description = " Perform mathematical calculations " ,
1258+ name = " get_weather " ,
1259+ description = " Get current weather for a city " ,
11411260 inputSchema = {
11421261 " type" : " object" ,
1143- " properties" : {
1144- " expression" : {" type" : " string" , " description" : " Math expression" }
1145- },
1146- " required" : [" expression" ],
1262+ " properties" : {" city" : {" type" : " string" , " description" : " City name" }},
1263+ " required" : [" city" ],
11471264 },
11481265 outputSchema = {
11491266 " type" : " object" ,
11501267 " properties" : {
1151- " result" : {" type" : " number" },
1152- " expression" : {" type" : " string" },
1268+ " temperature" : {" type" : " number" , " description" : " Temperature in Celsius" },
1269+ " condition" : {" type" : " string" , " description" : " Weather condition" },
1270+ " humidity" : {" type" : " number" , " description" : " Humidity percentage" },
1271+ " city" : {" type" : " string" , " description" : " City name" },
11531272 },
1154- " required" : [" result " , " expression " ],
1273+ " required" : [" temperature " , " condition " , " humidity " , " city " ],
11551274 },
11561275 )
11571276 ]
11581277
11591278
11601279@server.call_tool ()
11611280async def call_tool (name : str , arguments : dict[str , Any]) -> dict[str , Any]:
1162- if name == " calculate" :
1163- expression = arguments[" expression" ]
1164- try :
1165- result = eval (expression) # Use a safe math parser
1166- structured = {" result" : result, " expression" : expression}
1167-
1168- # low-level server will validate structured output against the tool's
1169- # output schema, and automatically serialize it into a TextContent block
1170- # for backwards compatibility with pre-2025-06-18 clients.
1171- return structured
1172- except Exception as e:
1173- raise ValueError (f " Calculation error: { str (e)} " )
1281+ """ Handle tool calls with structured output."""
1282+ if name == " get_weather" :
1283+ city = arguments[" city" ]
1284+
1285+ # Simulated weather data - in production, call a weather API
1286+ weather_data = {
1287+ " temperature" : 22.5 ,
1288+ " condition" : " partly cloudy" ,
1289+ " humidity" : 65 ,
1290+ " city" : city, # Include the requested city
1291+ }
1292+
1293+ # low-level server will validate structured output against the tool's
1294+ # output schema, and additionally serialize it into a TextContent block
1295+ # for backwards compatibility with pre-2025-06-18 clients.
1296+ return weather_data
1297+ else :
1298+ raise ValueError (f " Unknown tool: { name} " )
1299+
1300+
1301+ async def run ():
1302+ """ Run the structured output server."""
1303+ async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
1304+ await server.run(
1305+ read_stream,
1306+ write_stream,
1307+ InitializationOptions(
1308+ server_name = " structured-output-example" ,
1309+ server_version = " 0.1.0" ,
1310+ capabilities = server.get_capabilities(
1311+ notification_options = NotificationOptions(),
1312+ experimental_capabilities = {},
1313+ ),
1314+ ),
1315+ )
1316+
1317+
1318+ if __name__ == " __main__" :
1319+ asyncio.run(run())
11741320```
11751321
1322+ _ Full example: [ examples/snippets/servers/lowlevel/structured_output.py] ( https://github.com/modelcontextprotocol/python-sdk/blob/main/examples/snippets/servers/lowlevel/structured_output.py ) _
1323+ <!-- /snippet-source -->
1324+
11761325Tools can return data in three ways:
11771326
117813271 . ** Content only** : Return a list of content blocks (default behavior before spec revision 2025-06-18)
0 commit comments