Complete guide to integrating Protectron with LangChain for EU AI Act compliance logging.
Protectron's LangChain integration uses LangChain's native callback system to automatically capture:
No changes to your existing code required — just add our callback.
# Install with LangChain support
pip install protectron[langchain]
# Or install separately
pip install protectron langchain langchain-openaiRequirements:
from protectron.langchain import ProtectronCallback
# Basic initialization (uses PROTECTRON_API_KEY env var)
callback = ProtectronCallback(
system_id="my-langchain-app"
)
# Full configuration
callback = ProtectronCallback(
system_id="my-langchain-app",
environment="production", # production, staging, development
# Content logging
log_llm_content=True, # Log prompts and completions
log_tool_inputs=True, # Log tool parameters
log_tool_outputs=True, # Log tool results
# Privacy
pii_redaction=True, # Auto-redact PII
hash_user_ids=True, # Hash user identifiers
# Performance
sample_rate=1.0, # 1.0 = log everything
async_mode=True, # Non-blocking (recommended)
)from langchain_openai import ChatOpenAI
# Option 1: Add to LLM
llm = ChatOpenAI(
model="gpt-5.2",
callbacks=[callback]
)
# Option 2: Add at invocation time
result = chain.invoke(
{"input": "Hello"},
config={"callbacks": [callback]}
)
# Option 3: Add to executor/runner
executor = AgentExecutor(
agent=agent,
tools=tools,
callbacks=[callback]
)Basic chain with prompt, LLM, and output parser.
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from protectron.langchain import ProtectronCallback
callback = ProtectronCallback(system_id="simple-chain")
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant."),
("user", "{input}")
])
llm = ChatOpenAI(model="gpt-5.2")
chain = prompt | llm | StrOutputParser()
result = chain.invoke(
{"input": "Explain EU AI Act in one sentence"},
config={"callbacks": [callback]}
)Events logged:
Agent with tools that reasons and acts.
from langchain.agents import create_react_agent, AgentExecutor
from langchain_openai import ChatOpenAI
from langchain.tools import tool
from langchain import hub
from protectron.langchain import ProtectronCallback
callback = ProtectronCallback(
system_id="react-agent",
environment="production"
)
@tool
def search_database(query: str) -> str:
"""Search the internal database for information."""
return f"Found results for: {query}"
tools = [search_database]
llm = ChatOpenAI(model="gpt-5.2", temperature=0)
prompt = hub.pull("hwchase17/react")
agent = create_react_agent(llm, tools, prompt)
executor = AgentExecutor(
agent=agent,
tools=tools,
callbacks=[callback],
verbose=True
)
result = executor.invoke({"input": "Search for AI compliance"})Events logged:
Combine retrieval with generation for grounded responses.
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from protectron.langchain import ProtectronCallback
callback = ProtectronCallback(
system_id="rag-system",
log_tool_outputs=True, # Log retrieved documents
)
embeddings = OpenAIEmbeddings()
vectorstore = Chroma(embedding_function=embeddings)
retriever = vectorstore.as_retriever(search_kwargs={"k": 4})
template = """Answer based on this context:
{context}
Question: {question}"""
prompt = ChatPromptTemplate.from_template(template)
llm = ChatOpenAI(model="gpt-5.2")
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt | llm | StrOutputParser()
)
answer = rag_chain.invoke(
"What are high-risk AI requirements?",
config={"callbacks": [callback]}
)Events logged:
callback = ProtectronCallback(
system_id="selective-logging",
# Don't log prompts/completions (metadata only)
log_llm_content=False,
# Skip specific tools
exclude_tools=["internal_search", "debug_tool"],
# Only log certain event types
include_event_types=[
"agent_action",
"tool_start",
"tool_end",
"agent_finish"
]
)callback = ProtectronCallback(
system_id="pii-safe",
pii_redaction=True,
pii_types=[
"email",
"phone",
"ssn",
"credit_card",
"address"
]
)
# Before logging: "Contact john@example.com at 555-123-4567"
# After redaction: "Contact [EMAIL] at [PHONE]"callback = ProtectronCallback(
system_id="metadata-rich",
include_metadata={
"team": "customer-success",
"version": "2.1.0",
"region": "eu-west-1"
}
)
# Or add per-invocation metadata
result = executor.invoke(
{"input": "Hello"},
config={
"callbacks": [callback],
"metadata": {
"user_id": "user_123",
"session_id": "sess_abc"
}
}
)Yes, Protectron supports both LangChain 0.1.x and 0.2.x. The callback API is compatible with both versions.
Yes, LangGraph is fully supported. Add the callback to your graph compilation or individual node invocations.
With async_mode=True (default), logging is non-blocking and adds less than 5ms latency. For high-volume applications, use sampling.
Absolutely. Both callbacks can run simultaneously. Use LangSmith for debugging and development, Protectron for compliance.
Use the Protectron client directly: protectron.log_event('custom_event', {'key': 'value'}). This works alongside the LangChain callback.
Events are buffered locally and retried. If the buffer fills, oldest events are dropped. Enable persist_on_failure to write to disk.
Add EU AI Act compliance to your LangChain applications in minutes.