29 lines
No EOL
810 B
Python
29 lines
No EOL
810 B
Python
from typing import TypedDict
|
|
from langgraph.graph import StateGraph, START
|
|
from langgraph.checkpoint.memory import MemorySaver
|
|
from langchain_openai import ChatOpenAI
|
|
from os import getenv
|
|
from dotenv import load_dotenv
|
|
|
|
load_dotenv()
|
|
|
|
class State(TypedDict):
|
|
messages: list
|
|
|
|
llm = ChatOpenAI(
|
|
openai_api_key=getenv("OPENROUTER_API_KEY"),
|
|
openai_api_base=getenv("OPENROUTER_BASE_URL"),
|
|
model_name="qwen/qwen3-235b-a22b-07-25",
|
|
)
|
|
|
|
def call_model(state: State):
|
|
# simple memory trim example: keep last 20 messages
|
|
trimmed = state["messages"][-20:]
|
|
response = llm.invoke(trimmed)
|
|
return {"messages": [response]}
|
|
|
|
workflow = StateGraph(State)
|
|
workflow.add_node("model", call_model)
|
|
workflow.add_edge(START, "model")
|
|
memory = MemorySaver()
|
|
app_graph = workflow.compile(checkpointer=memory) |