Skip to main content

Code Examples


Python — Basic usage

from atlas_memory import CognitiveBrain

brain = CognitiveBrain(
    api_key="atlas_your_key_here",
    base_url="https://api.atlas.bsyncs.com",
    user_id="user-123",
    session_id="session-abc",    # optional
)

# Ingest
result = brain.add("The architecture review is scheduled for Friday.")
print(f"Stored {result.facts_ingested} facts in {result.latency_ms:.0f}ms")

# Retrieve
search = brain.search("When is the architecture review?", k=5)
print(search.context)

# Graph QA (multi-hop)
answer = brain.ask("Who is responsible for the architecture review?")
print(answer)

Python — Async

from atlas_memory import AsyncCognitiveBrain

async def main():
    async with AsyncCognitiveBrain(
        api_key="atlas_...",
        user_id="user-123"
    ) as brain:
        await brain.add("Atlas uses Neo4j for the knowledge graph.")
        result = await brain.search("What graph database does Atlas use?")
        print(result.format())

OpenAI function calling

from atlas_memory import CognitiveBrain
from openai import OpenAI
import json

brain = CognitiveBrain(api_key="atlas_...", user_id="user-123")
client = OpenAI()

messages = [{"role": "user", "content": user_message}]

response = client.chat.completions.create(
    model="gpt-4o",
    messages=messages,
    tools=brain.get_openai_tools(),
    tool_choice="auto",
)

# Handle tool calls
for tc in response.choices[0].message.tool_calls or []:
    result = brain.handle_tool_call(
        tc.function.name,
        json.loads(tc.function.arguments)
    )
    messages.append({
        "role": "tool",
        "tool_call_id": tc.id,
        "content": result
    })

LangChain

from atlas_memory import CognitiveBrain
from langchain_openai import ChatOpenAI
from langchain.agents import create_openai_tools_agent, AgentExecutor
from langchain_core.prompts import ChatPromptTemplate

brain = CognitiveBrain(api_key="atlas_...", user_id="user-123")
tools = brain.get_langchain_tools()

llm = ChatOpenAI(model="gpt-4o")
prompt = ChatPromptTemplate.from_messages([
    ("system", "You are a helpful assistant with long-term memory."),
    ("placeholder", "{chat_history}"),
    ("human", "{input}"),
    ("placeholder", "{agent_scratchpad}"),
])

agent = create_openai_tools_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools)
executor.invoke({"input": "What do you remember about our database choices?"})

CrewAI

from atlas_memory import CognitiveBrain
from crewai import Agent, Task, Crew

brain = CognitiveBrain(api_key="atlas_...", user_id="crew-agent")
tools = brain.get_crewai_tools()

researcher = Agent(
    role="Research Analyst",
    goal="Gather and remember information about the project",
    tools=tools,
    verbose=True,
)

REST — cURL

# Ingest
curl -X POST https://api.atlas.bsyncs.com/brain/ingest \
  -H "X-API-Key: atlas_your_key_here" \
  -H "Content-Type: application/json" \
  -d '{"text": "The payments service is owned by Alice.", "user_id": "user-123"}'

# Retrieve
curl -X POST https://api.atlas.bsyncs.com/brain/retrieve \
  -H "X-API-Key: atlas_your_key_here" \
  -H "Content-Type: application/json" \
  -d '{"query": "Who owns payments?", "user_id": "user-123", "k": 5}'

# Graph QA
curl -X POST https://api.atlas.bsyncs.com/brain/retrieve/graph-qa \
  -H "X-API-Key: atlas_your_key_here" \
  -H "Content-Type: application/json" \
  -d '{"query": "What services does Alice own?", "user_id": "user-123"}'