49 lines
No EOL
1.4 KiB
Python
49 lines
No EOL
1.4 KiB
Python
import os
|
|
from dotenv import load_dotenv
|
|
from starlette.applications import Starlette
|
|
from starlette.routing import Route, WebSocketRoute, Mount
|
|
from starlette.staticfiles import StaticFiles
|
|
from starlette.templating import Jinja2Templates
|
|
from starlette.websockets import WebSocket, WebSocketDisconnect
|
|
from langchain_ollama import ChatOllama
|
|
|
|
load_dotenv()
|
|
|
|
# Initialize templates
|
|
templates = Jinja2Templates(directory="templates")
|
|
# Use ChatOpenAI or any other chat model
|
|
llm = ChatOllama(model="qwen3:4b", reasoning=False)
|
|
|
|
messages = [
|
|
("system", "You are a helpful assistant"),
|
|
]
|
|
|
|
async def homepage(request):
|
|
return templates.TemplateResponse("index.html", {"request": request})
|
|
|
|
async def websocket_endpoint(websocket: WebSocket):
|
|
await websocket.accept()
|
|
try:
|
|
while True:
|
|
message = await websocket.receive_text()
|
|
messages.append(message)
|
|
|
|
# Stream the response
|
|
for chunk in llm.stream(messages):
|
|
print(chunk)
|
|
await websocket.send_text(chunk.text())
|
|
|
|
except WebSocketDisconnect:
|
|
print("Client disconnected")
|
|
|
|
routes = [
|
|
Route("/", homepage),
|
|
WebSocketRoute("/ws", websocket_endpoint),
|
|
Mount("/static", StaticFiles(directory="static"), name="static"),
|
|
]
|
|
|
|
app = Starlette(debug=True, routes=routes)
|
|
|
|
if __name__ == "__main__":
|
|
import uvicorn
|
|
uvicorn.run(app, host="0.0.0.0", port=8000) |