!pip install -qU \
langchain-core==0.3.33 \
langchain-openai==0.3.3 \
langchain-community==0.3.16Langchain Cheatsheet
Building the LangChain
Installing Required Packages
Initializing
With Azure
import os
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
from langchain_openai import AzureChatOpenAI
llm = AzureChatOpenAI(
azure_endpoint="https://models.inference.ai.azure.com",
azure_deployment="gpt-4o-mini",
openai_api_version="2025-03-01-preview",
model_name="gpt-4o-mini",
temperature=0,
api_key=GITHUB_TOKEN,
)With OpenAI
import os
from getpass import getpass
from langchain_openai import ChatOpenAI
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") or getpass(
"Enter OpenAI API Key: " # save the API key in the environment (change the "OPEN_API_KEY" as in the environment) or you'll be prompted to input the API key
)
openai_model = "gpt-4o-mini" # change the model as required
# For normal accurate responses
llm = ChatOpenAI(temperature=0.0, model=openai_model)
openai_model = "gpt-4o-mini" # change the model as requiredPreparing the Prompt
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain.prompts import ChatPromptTemplate
# Defining the system prompt (how the AI should act)
system_prompt = SystemMessagePromptTemplate.from_template(
"You are an AI assistant called {name} that helps generate article titles.",
input_variable=["name"]
)
# the user prompt is provided by the user, in this case however the only dynamic
# input is the article
user_prompt = HumanMessagePromptTemplate.from_template(
"""You are tasked with creating a name for a article.
The article is here for you to examine {article}
The name should be based of the context of the article.
Be creative, but make sure the names are clear, catchy,
and relevant to the theme of the article.
Only output the article name, no other explanation or
text can be provided.""",
input_variables=["article"]
)
first_prompt = ChatPromptTemplate.from_messages([system_prompt, user_prompt])Use the following command to print user_prompt.
print(user_prompt.format(article="TEST STRING").content)Use the following command to print first_prompt
print(first_prompt.format(name="NAME", article="TEST STRING"))Chaining and Invoking without Pydantic
Chaining the Prompt and the LLM
chain = (
{"article": lambda x: x["article"],
"name": lambda x: x["name"]} # defining the variables inside the prompt
| first_prompt # format the prompt
| llm # generate the llm
| {"article_title": lambda x: x.content} # retrieve output
)Invoking the LLM
article_title_msg = chain.invoke({
"article": article,
"name": "Joe" # inputting the required variables
})
article_title_msgChaining and Invoking with Pydantic
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain.prompts import ChatPromptTemplate
# Defining the system prompt (how the AI should act)
system_prompt = SystemMessagePromptTemplate.from_template(
"You are an AI assistant that helps generate article titles."
)
user_prompt = HumanMessagePromptTemplate.from_template(
"""You are tasked with creating a new paragraph for the
article. The article is here for you to examine:
---
{article}
---
Choose one paragraph to review and edit. During your edit
ensure you provide constructive feedback to the user so they
can learn where to improve their own writing.""",
input_variables=["article"]
)Creating a Pydantic Object
Create a pydantic object describing the output format needed.
from pydantic import BaseModel, Field
class Paragraph(BaseModel):
original_paragraph: str = Field(description="The original paragraph")
edited_paragraph: str = Field(description="The improved edited paragraph")
feedback: str = Field(description=(
"Constructive feedback on the original paragraph"
))
structured_llm = llm.with_structured_output(Paragraph)Chaining the Prompt and the LLM
chain = (
{"article": lambda x: x["article"]}
| first_prompt
| structured_llm
| {
"original_paragraph": lambda x: x.original_paragraph,
"edited_paragraph": lambda x: x.edited_paragraph,
"feedback": lambda x: x.feedback
}
)Invoking the LLM
out = chain_three.invoke({"article": article})
outChat Memory
Four types of chat memory in LangChain
ConversationBufferMemory with RunnableWithMessageHistory
from langchain.prompts import (
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
ChatPromptTemplate
)
system_prompt = "You are a helpful assistant called Zeta."
prompt_template = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(system_prompt),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{query}"),
])
pipeline = prompt_template | llm
from langchain_core.chat_history import InMemoryChatMessageHistory
chat_map = {}
def get_chat_history(session_id: str = "default") -> InMemoryChatMessageHistory:
if session_id not in chat_map:
# if session ID doesn't exist, create a new chat history
chat_map[session_id] = InMemoryChatMessageHistory()
return chat_map[session_id]
from langchain_core.runnables.history import RunnableWithMessageHistory
pipeline_with_history = RunnableWithMessageHistory(
pipeline,
get_session_history=get_chat_history,
input_messages_key="query",
history_messages_key="history"
)
pipeline_with_history.invoke(
{"query": "Hi, my name is James"},
config={"session_id": "id_123"}
)ConversationBufferWindowMemory with RunnableWithMessageHistory
from pydantic import BaseModel, Field
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import BaseMessage
class BufferWindowMessageHistory(BaseChatMessageHistory, BaseModel):
messages: list[BaseMessage] = Field(default_factory=list)
k: int = Field(default_factory=int)
def __init__(self, k: int):
super().__init__(k=k)
print(f"Initializing BufferWindowMessageHistory with k={k}")
def add_messages(self, messages: list[BaseMessage]) -> None:
"""Add messages to the history, removing any messages beyond
the last `k` messages.
"""
self.messages.extend(messages)
self.messages = self.messages[-self.k:]
def clear(self) -> None:
"""Clear the history."""
self.messages = []
chat_map = {}
def get_chat_history(session_id: str, k: int = 4) -> BufferWindowMessageHistory:
print(f"get_chat_history called with session_id={session_id} and k={k}")
if session_id not in chat_map:
# if session ID doesn't exist, create a new chat history
chat_map[session_id] = BufferWindowMessageHistory(k=k)
# remove anything beyond the last
return chat_map[session_id]
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.runnables import ConfigurableFieldSpec
pipeline_with_history = RunnableWithMessageHistory(
pipeline,
get_session_history=get_chat_history,
input_messages_key="query",
history_messages_key="history",
history_factory_config=[
ConfigurableFieldSpec(
id="session_id",
annotation=str,
name="Session ID",
description="The session ID to use for the chat history",
default="id_default",
),
ConfigurableFieldSpec(
id="k",
annotation=int,
name="k",
description="The number of messages to keep in the history",
default=4,
),
],
)
pipeline_with_history.invoke(
{"query": "Hi, my name is James"},
config={"configurable": {"session_id": "id_k4", "k": 4}},
)We can also modify the messages that are stored in memory by modifying the records inside the chat_map dictionary directly.
chat_map["id_k4"].clear() # clear the history
# manually insert history
chat_map["id_k4"].add_user_message("Hi, my name is James")
chat_map["id_k4"].add_ai_message("I'm an AI model called Zeta.")
chat_map["id_k4"].add_user_message(
"I'm researching the different types of conversational memory."
)
chat_map["id_k4"].add_ai_message("That's interesting, what are some examples?")We can retrieve the messages using the following command line.
chat_map["id_k4"].messagesConversationSummaryMemory with RunnableWithMessageHistory
from langchain_core.messages import SystemMessage
class ConversationSummaryMessageHistory(BaseChatMessageHistory, BaseModel):
messages: list[BaseMessage] = Field(default_factory=list)
llm: AzureChatOpenAI = Field(default_factory=AzureChatOpenAI)
def __init__(self, llm: AzureChatOpenAI):
super().__init__(llm=llm)
def add_messages(self, messages: list[BaseMessage]) -> None:
"""Add messages to the history, removing any messages beyond
the last `k` messages.
"""
self.messages.extend(messages)
# construct the summary chat messages
summary_prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(
"Given the existing conversation summary and the new messages, "
"generate a new summary of the conversation. Ensuring to maintain "
"as much relevant information as possible."
),
HumanMessagePromptTemplate.from_template(
"Existing conversation summary:\n{existing_summary}\n\n"
"New messages:\n{messages}"
),
]
)
# format the messages and invoke the LLM
new_summary = self.llm.invoke(
summary_prompt.format_messages(
existing_summary=self.messages.content,
messages=[x.content for x in messages],
)
)
# replace the existing history with a single system summary message
self.messages = [SystemMessage(content=new_summary.content)]
def clear(self) -> None:
"""Clear the history."""
self.messages = []
chat_map = {}
def get_chat_history(
session_id: str, llm: AzureChatOpenAI
) -> ConversationSummaryMessageHistory:
if session_id not in chat_map:
# if session ID doesn't exist, create a new chat history
chat_map[session_id] = ConversationSummaryMessageHistory(llm=llm)
# return the chat history
return chat_map[session_id]
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.runnables import ConfigurableFieldSpec
pipeline_with_history = RunnableWithMessageHistory(
pipeline,
get_session_history=get_chat_history,
input_messages_key="query",
history_messages_key="history",
history_factory_config=[
ConfigurableFieldSpec(
id="session_id",
annotation=str,
name="Session ID",
description="The session ID to use for the chat history",
default="id_default",
),
ConfigurableFieldSpec(
id="llm",
annotation=AzureChatOpenAI,
name="LLM",
description="The LLM to use for the conversation summary",
default=llm,
),
],
)
pipeline_with_history.invoke(
{"query": "Hi, my name is James"}, config={"session_id": "id_123", "llm": llm}
)
# We can also invoke the llm using the following for msg in [
"I have been looking at ConversationBufferMemory and ConversationBufferWindowMemory.",
"Buffer memory just stores the entire conversation",
"Buffer window memory stores the last k messages, dropping the rest.",
]:
pipeline_with_history.invoke(
{"query": msg}, config={"session_id": "id_123", "llm": llm}
)ConversationSummaryBufferMemory with RunnableWithMessageHistory
class ConversationSummaryBufferMessageHistory(BaseChatMessageHistory, BaseModel):
messages: list[BaseMessage] = Field(default_factory=list)
llm: AzureChatOpenAI = Field(default_factory=AzureChatOpenAI)
k: int = Field(default_factory=int)
def __init__(self, llm: AzureChatOpenAI, k: int):
super().__init__(llm=llm, k=k)
def add_messages(self, messages: list[BaseMessage]) -> None:
"""Add messages to the history, removing any messages beyond
the last `k` messages and summarizing the messages that we
drop.
"""
existing_summary: SystemMessage | None = None
old_messages: list[BaseMessage] | None = None
# see if we already have a summary message
if len(self.messages) > 0 and isinstance(self.messages[0], SystemMessage):
print(">> Found existing summary")
existing_summary = self.messages.pop(0)
# add the new messages to the history
self.messages.extend(messages)
# check if we have too many messages
if len(self.messages) > self.k:
print(
f">> Found {len(self.messages)} messages, dropping "
f"oldest {len(self.messages) - self.k} messages."
)
# pull out the oldest messages...
old_messages = self.messages[: -self.k]
# ...and keep only the most recent messages
self.messages = self.messages[-self.k :]
if old_messages is None:
print(">> No old messages to update summary with")
# if we have no old_messages, we have nothing to update in summary
return
# construct the summary chat messages
summary_prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(
"Given the existing conversation summary and the new messages, "
"generate a new summary of the conversation. Ensuring to maintain "
"as much relevant information as possible."
),
HumanMessagePromptTemplate.from_template(
"Existing conversation summary:\n{existing_summary}\n\n"
"New messages:\n{old_messages}"
),
]
)
# format the messages and invoke the LLM
new_summary = self.llm.invoke(
summary_prompt.format_messages(
existing_summary=existing_summary, old_messages=old_messages
)
)
print(f">> New summary: {new_summary.content}")
# prepend the new summary to the history
self.messages = [SystemMessage(content=new_summary.content)] + self.messages
def clear(self) -> None:
"""Clear the history."""
self.messages = []
chat_map = {}
def get_chat_history(
session_id: str, llm: AzureChatOpenAI, k: int
) -> ConversationSummaryBufferMessageHistory:
if session_id not in chat_map:
# if session ID doesn't exist, create a new chat history
chat_map[session_id] = ConversationSummaryBufferMessageHistory(llm=llm, k=k)
# return the chat history
return chat_map[session_id]
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.runnables import ConfigurableFieldSpec
pipeline_with_history = RunnableWithMessageHistory(
pipeline,
get_session_history=get_chat_history,
input_messages_key="query",
history_messages_key="history",
history_factory_config=[
ConfigurableFieldSpec(
id="session_id",
annotation=str,
name="Session ID",
description="The session ID to use for the chat history",
default="id_default",
),
ConfigurableFieldSpec(
id="llm",
annotation=AzureChatOpenAI,
name="LLM",
description="The LLM to use for the conversation summary",
default=llm,
),
ConfigurableFieldSpec(
id="k",
annotation=int,
name="k",
description="The number of messages to keep in the history",
default=4,
),
],
)
pipeline_with_history.invoke(
{"query": "Hi, my name is James"},
config={"session_id": "id_123", "llm": llm, "k": 4},
)
chat_map["id_123"].messages