Chat History and multi model

This commit is contained in:
Sebarocks 2025-07-29 23:42:15 -04:00
parent a9ffb48b4b
commit 44f391ef1e
13 changed files with 1072 additions and 839 deletions

View file

@ -1,29 +1,17 @@
from typing import TypedDict
from langgraph.graph import StateGraph, START
from langgraph.checkpoint.memory import MemorySaver
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, AIMessage
from os import getenv
from dotenv import load_dotenv
load_dotenv()
class State(TypedDict):
messages: list
def get_llm(provider: str):
"""Return a LangChain chat model for the requested provider."""
return ChatOpenAI(
openai_api_key=getenv("OPENROUTER_API_KEY"),
openai_api_base=getenv("OPENROUTER_BASE_URL"),
model_name=provider,
)
llm = ChatOpenAI(
openai_api_key=getenv("OPENROUTER_API_KEY"),
openai_api_base=getenv("OPENROUTER_BASE_URL"),
model_name="qwen/qwen3-235b-a22b-07-25",
)
def call_model(state: State):
# simple memory trim example: keep last 20 messages
trimmed = state["messages"][-20:]
response = llm.invoke(trimmed)
return {"messages": [response]}
workflow = StateGraph(State)
workflow.add_node("model", call_model)
workflow.add_edge(START, "model")
memory = MemorySaver()
app_graph = workflow.compile(checkpointer=memory)
def get_messages(chats, chat_id):
return [HumanMessage(**m) if m["role"] == "human" else AIMessage(**m) for m in chats[chat_id]]