OpenAI
Memori supports all OpenAI Chat Completions and Responses APIs. Both sync and async clients are fully supported.
Quick Start
OpenAI Integration
from memori import Memori
from openai import OpenAI
client = OpenAI()
mem = Memori().llm.register(client)
mem.attribution(entity_id="user_123", process_id="my_agent")
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
Supported Modes
| Mode | Python | TypeScript |
|---|---|---|
| Sync | client.chat.completions.create() | — |
| Async | await client.chat.completions.create() | await client.chat.completions.create() |
| Streamed | stream=True parameter | stream: true parameter |
| Responses API | client.responses.create() | — |
Additional Modes
Async (Python)
import asyncio
from memori import Memori
from openai import AsyncOpenAI
client = AsyncOpenAI()
mem = Memori().llm.register(client)
mem.attribution(entity_id="user_123", process_id="my_agent")
async def main():
response = await client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
asyncio.run(main())
Streaming
Streaming
from memori import Memori
from openai import OpenAI
client = OpenAI()
mem = Memori().llm.register(client)
mem.attribution(entity_id="user_123", process_id="my_agent")
stream = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": "Hello!"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
Responses API (Python)
from memori import Memori
from openai import OpenAI
client = OpenAI()
mem = Memori().llm.register(client)
mem.attribution(entity_id="user_123", process_id="my_agent")
response = client.responses.create(
model="gpt-4o-mini",
input="Hello!",
instructions="You are a helpful assistant."
)
print(response.output_text)
Multi-Turn Conversations
Memori automatically captures each interaction and links them within the same session.
Multi-Turn Conversations
from memori import Memori
from openai import OpenAI
client = OpenAI()
mem = Memori().llm.register(client)
mem.attribution(entity_id="user_123", process_id="my_agent")
messages = [
{"role": "user", "content": "My name is Alice."}
]
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=messages
)
messages.append({
"role": "assistant",
"content": response.choices[0].message.content
})
messages.append({
"role": "user",
"content": "What's my name?"
})
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=messages
)
print(response.choices[0].message.content)