AI Agent Integration
Learn how to integrate MarkdownAPI.io with AI agents for content generation and memory persistence.
Use Cases
1. Agent Memory Storage
Store conversation history and context:
async def save_agent_memory(agent_id: str, conversation: str, metadata: dict):
"""Save agent conversation to memory."""
filename = f"conversations/{agent_id}.md"
content = f"""# Agent Memory: {agent_id}
## Last Updated
{metadata.get('timestamp', 'Unknown')}
## Conversation
{conversation}
"""
await upload_file(
project_id=AGENT_MEMORY_PROJECT,
file_path=filename,
content=content.encode(),
metadata=metadata
)2. Content Generation Pipeline
Generate and store AI-created content:
async def generate_and_store_article(topic: str, agent_name: str):
"""Generate article with AI and store."""
import openai
# Generate content
response = await openai.ChatCompletion.acreate(
model="gpt-4",
messages=[{"role": "user", "content": f"Write article about: {topic}"}]
)
content = response.choices[0].message.content
# Store in MarkdownAPI
metadata = {
"topic": topic,
"generated_by": agent_name,
"model": "gpt-4",
"timestamp": datetime.utcnow().isoformat()
}
return await upload_file(
project_id=CONTENT_PROJECT,
file_path=f"articles/{topic.replace(' ', '-').lower()}.md",
content=content.encode(),
metadata=metadata
)3. Knowledge Base Management
Build and query agent knowledge base:
class AgentKnowledgeBase:
def __init__(self, project_id: str):
self.project_id = project_id
async def add_knowledge(self, topic: str, content: str, tags: list[str]):
"""Add knowledge article."""
filename = f"knowledge/{topic}.md"
metadata = {
"tags": tags,
"created": datetime.utcnow().isoformat(),
"type": "knowledge"
}
return await upload_file(
project_id=self.project_id,
file_path=filename,
content=content.encode(),
metadata=metadata
)
async def search_knowledge(self, tag: str):
"""Find knowledge by tag."""
files = await list_files(self.project_id)
return [f for f in files
if tag in f.get('custom_metadata', {}).get('tags', [])]
async def get_knowledge(self, topic: str):
"""Retrieve knowledge article."""
return await download_file(self.project_id, f"knowledge/{topic}.md")LangChain Integration
from langchain.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain
class MarkdownAPIMemory(ConversationBufferMemory):
"""Custom LangChain memory backed by MarkdownAPI."""
def __init__(self, project_id: str, session_id: str):
super().__init__()
self.project_id = project_id
self.session_id = session_id
self.filename = f"sessions/{session_id}.md"
async def save_context(self, inputs, outputs):
"""Save conversation to MarkdownAPI."""
await super().save_context(inputs, outputs)
# Format conversation
conversation = self.buffer
# Upload to MarkdownAPI
await upload_file(
project_id=self.project_id,
file_path=self.filename,
content=conversation.encode(),
metadata={
"session_id": self.session_id,
"updated": datetime.utcnow().isoformat()
}
)
async def load_context(self):
"""Load conversation from MarkdownAPI."""
try:
content = await download_file(self.project_id, self.filename)
self.buffer = content.decode()
except:
pass # No previous conversation
# Usage
memory = MarkdownAPIMemory(project_id="...", session_id="user123")
await memory.load_context()
chain = ConversationChain(
llm=ChatOpenAI(),
memory=memory
)
response = await chain.arun("Hello!")Best Practices
- Organize by agent: Create separate projects for different agents
- Use metadata: Store agent state and context in metadata
- Version memory: Track conversation versions
- Implement cleanup: Regularly archive old conversations
- Handle concurrency: Use file versioning for multi-agent scenarios
See Also
- Quick Start - Basic API usage
- Files API - File management reference
Last updated on