Overview
上の例のようなLangGraphで実装したコードのTestを書く
LangChainの OpenAIChatがどのように呼ばれるかを知る必要がある
基本
chat.with_structured_output
source
structured_llm_grader = chat.with_structured_output(GradeDocuments)
retrieval_grader = grade_prompt | structured_llm_grader
score = retrieval_grader.invoke({"question": question, "document": d.page_content})
test
chat_mock = MagicMock()
chat_mock.with_structured_output.return_value.side_effect = [
GradeDocuments(binary_score="yes"), # retrieval_grader for the first document
GradeDocuments(binary_score="no"), # retrieval_grader for the second document
]
chat.invoke
source
generate_prompt = ChatPromptTemplate.from_template(generate_prompt_template)
rag_chain = generate_prompt | chat | StrOutputParser()
generation = rag_chain.invoke({"context": documents, "question": question})
test
chat_mock.side_effect = [
AIMessage(
content="""以下のページを見つけました:
1. <http://example.com/page1|Page 1>
2. <http://example.com/page2|Page 2>"""
), # rag_chain generate
]
Example langgraph code
from typing import List
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langgraph.graph import END, START, StateGraph
from pydantic import BaseModel, Field
from typing_extensions import TypedDict
retrieve_grader_system = """You are a grader assessing relevance of a retrieved document to a user question. \n
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant. \n
It does not need to be a stringent test. The goal is to filter out erroneous retrievals. \n
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question."""
generate_prompt_template = """You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.
Question: {question}
Context: {context}
Answer:""" # noqa E501
hallucination_system = """You are a grader assessing whether an LLM generation is grounded in / supported by a set of retrieved facts. \n
Give a binary score 'yes' or 'no'. 'Yes' means that the answer is grounded in / supported by the set of facts.""" # noqa E501
answer_grader_system = """You are a grader assessing whether an answer addresses / resolves a question \n
Give a binary score 'yes' or 'no'. Yes' means that the answer resolves the question."""
question_rewriter_system = """You a question re-writer that converts an input question to a better version that is optimized \n
for vectorstore retrieval. Look at the input and try to reason about the underlying semantic intent / meaning.""" # noqa E501
class GradeDocuments(BaseModel):
"""Binary score for relevance check on retrieved documents."""
binary_score: str = Field(description="Documents are relevant to the question, 'yes' or 'no'")
class GradeHallucinations(BaseModel):
"""Binary score for hallucination present in generation answer."""
binary_score: str = Field(description="Answer is grounded in the facts, 'yes' or 'no'")
class GradeAnswer(BaseModel):
"""Binary score to assess answer addresses question."""
binary_score: str = Field(description="Answer addresses the question, 'yes' or 'no'")
class GraphState(TypedDict):
"""
Represents the state of our graph.
Attributes:
question: question
generation: LLM generation
documents: list of documents
"""
question: str
generation: str
documents: List[str]
def create_graph(chat, retriever):
# Retrieval Grader
structured_llm_grader = chat.with_structured_output(GradeDocuments)
grade_prompt = ChatPromptTemplate.from_messages(
[
("system", retrieve_grader_system),
("human", "Retrieved document: \n\n {document} \n\n User question: {question}"),
]
)
retrieval_grader = grade_prompt | structured_llm_grader
# Generate
generate_prompt = ChatPromptTemplate.from_template(generate_prompt_template)
rag_chain = generate_prompt | chat | StrOutputParser()
# Hallucination Grader
structured_llm_grader = chat.with_structured_output(GradeHallucinations)
hallucination_prompt = ChatPromptTemplate.from_messages(
[
("system", hallucination_system),
("human", "Set of facts: \n\n {documents} \n\n LLM generation: {generation}"),
]
)
hallucination_grader = hallucination_prompt | structured_llm_grader
# Answer Grader
structured_llm_grader = chat.with_structured_output(GradeAnswer)
answer_prompt = ChatPromptTemplate.from_messages(
[
("system", answer_grader_system),
("human", "User question: \n\n {question} \n\n LLM generation: {generation}"),
]
)
answer_grader = answer_prompt | structured_llm_grader
# Question Re-writer
re_write_prompt = ChatPromptTemplate.from_messages(
[
("system", question_rewriter_system),
(
"human",
"Here is the initial question: \n\n {question} \n Formulate an improved question.",
),
]
)
question_rewriter = re_write_prompt | chat | StrOutputParser()
def retrieve(state):
"""
Retrieve documents
Args:
state (dict): The current graph state
Returns:
state (dict): New key added to state, documents, that contains retrieved documents
"""
print("---RETRIEVE---")
question = state["question"]
# Retrieval
documents = retriever.invoke(question)
print(f"{len(documents)} documents retrieved")
return {"documents": documents, "question": question}
def generate(state):
"""
Generate answer
Args:
state (dict): The current graph state
Returns:
state (dict): New key added to state, generation, that contains LLM generation
"""
print("---GENERATE---")
question = state["question"]
documents = state["documents"]
# RAG generation
generation = rag_chain.invoke({"context": documents, "question": question})
return {"documents": documents, "question": question, "generation": generation}
def grade_documents(state):
"""
Determines whether the retrieved documents are relevant to the question.
Args:
state (dict): The current graph state
Returns:
state (dict): Updates documents key with only filtered relevant documents
"""
print("---CHECK DOCUMENT RELEVANCE TO QUESTION---")
question = state["question"]
documents = state["documents"]
# Score each doc
filtered_docs = []
for d in documents:
score = retrieval_grader.invoke({"question": question, "document": d.page_content})
grade = score.binary_score
if grade == "yes":
print("---GRADE: DOCUMENT RELEVANT---")
filtered_docs.append(d)
else:
print("---GRADE: DOCUMENT NOT RELEVANT---")
print(f"filtered documents ({len(documents)} -> {len(filtered_docs)})")
return {"documents": filtered_docs, "question": question}
def transform_query(state):
"""
Transform the query to produce a better question.
Args:
state (dict): The current graph state
Returns:
state (dict): Updates question key with a re-phrased question
"""
print("---TRANSFORM QUERY---")
question = state["question"]
documents = state["documents"]
# Re-write question
better_question = question_rewriter.invoke({"question": question})
return {"documents": documents, "question": better_question}
### Edges ###
def decide_to_generate(state):
"""
Determines whether to generate an answer, or re-generate a question.
Args:
state (dict): The current graph state
Returns:
str: Binary decision for next node to call
"""
print("---ASSESS GRADED DOCUMENTS---")
state["question"]
filtered_documents = state["documents"]
if not filtered_documents:
# All documents have been filtered check_relevance
# We will re-generate a new query
print("---DECISION: ALL DOCUMENTS ARE NOT RELEVANT TO QUESTION, TRANSFORM QUERY---")
return "transform_query"
else:
# We have relevant documents, so generate answer
print("---DECISION: GENERATE---")
return "generate"
def grade_generation_v_documents_and_question(state):
"""
Determines whether the generation is grounded in the document and answers question.
Args:
state (dict): The current graph state
Returns:
str: Decision for next node to call
"""
print("---CHECK HALLUCINATIONS---")
question = state["question"]
documents = state["documents"]
generation = state["generation"]
score = hallucination_grader.invoke({"documents": documents, "generation": generation})
grade = score.binary_score
# Check hallucination
if grade == "yes":
print("---DECISION: GENERATION IS GROUNDED IN DOCUMENTS---")
# Check question-answering
print("---GRADE GENERATION vs QUESTION---")
score = answer_grader.invoke({"question": question, "generation": generation})
grade = score.binary_score
if grade == "yes":
print("---DECISION: GENERATION ADDRESSES QUESTION---")
return "useful"
else:
print("---DECISION: GENERATION DOES NOT ADDRESS QUESTION---")
return "not useful"
else:
print("---DECISION: GENERATION IS NOT GROUNDED IN DOCUMENTS, RE-TRY---")
return "not supported"
workflow = StateGraph(GraphState)
# Define the nodes
workflow.add_node("retrieve", retrieve) # retrieve
workflow.add_node("grade_documents", grade_documents) # grade documents
workflow.add_node("generate", generate) # generatae
workflow.add_node("transform_query", transform_query) # transform_query
# Build graph
workflow.add_edge(START, "retrieve")
workflow.add_edge("retrieve", "grade_documents")
workflow.add_conditional_edges(
"grade_documents",
decide_to_generate,
{
"transform_query": "transform_query",
"generate": "generate",
},
)
workflow.add_edge("transform_query", "retrieve")
workflow.add_conditional_edges(
"generate",
grade_generation_v_documents_and_question,
{
"not supported": "generate",
"useful": END,
"not useful": "transform_query",
},
)
# Compile
app = workflow.compile()
return app
Example test
from unittest.mock import MagicMock
from langchain.schema import Document
from langchain_core.messages import AIMessage
from src.examples.langgraph_self_reflection import create_graph, GradeDocuments, GradeHallucinations, GradeAnswer # import from the code above
def test_graph():
retriever = MagicMock()
retriever.invoke.side_effect = [
[
Document(metadata={"title": "Page 1", "source": "http://example.com/page1"}, page_content=""),
Document(metadata={"title": "Page 2", "source": "http://example.com/page2"}, page_content=""),
], # retrieve
]
chat_mock = MagicMock()
chat_mock.side_effect = [
AIMessage(
content="""以下のページを見つけました:
1. <http://example.com/page1|Page 1>"""
), # rag_chain generate
]
chat_mock.with_structured_output.return_value.side_effect = [
GradeDocuments(binary_score="yes"), # retrieval_grader for the first document
GradeDocuments(binary_score="no"), # retrieval_grader for the second document
GradeHallucinations(binary_score="yes"), # hallucination_grader
GradeAnswer(binary_score="yes"), # answer_grader
]
graph = create_graph(chat=chat_mock, retriever=retriever)
res = graph.invoke({"question": "What's the origin of the name 'Sofia'?"})
# Verify the expected behavior
expected_output = """以下のページを見つけました:
1. <http://example.com/page1|Page 1>"""
assert res == {
"question": "What's the origin of the name 'Sofia'?",
"generation": expected_output,
"documents": [
Document(metadata={"title": "Page 1", "source": "http://example.com/page1"}, page_content=""),
], # the second document is filtered out by the retrieval_grader
}
Example test execution
pytest
tests/examples/test_langgraph_self_reflection.py::test_graph ---RETRIEVE---
2 documents retrieved
---CHECK DOCUMENT RELEVANCE TO QUESTION---
---GRADE: DOCUMENT RELEVANT---
---GRADE: DOCUMENT NOT RELEVANT---
filtered documents (2 -> 1)
---ASSESS GRADED DOCUMENTS---
---DECISION: GENERATE---
---GENERATE---
---CHECK HALLUCINATIONS---
---DECISION: GENERATION IS GROUNDED IN DOCUMENTS---
---GRADE GENERATION vs QUESTION---
---DECISION: GENERATION ADDRESSES QUESTION---
PASSED