"""Interactive CLI example: solve a task with memory augmentation, then review it.
This example shows the core Reflect SDK loop:
1. Augment a task with past memories
2. Solve it with an LLM
3. Review the answer (pass / fail / defer)
4. Store the trace so Reflect can learn from it
Prerequisites:
- Reflect API running locally (or set --base-url)
- REFLECT_API_KEY and REFLECT_PROJECT_ID set in the environment
- OPENAI_API_KEY set in the environment
"""
import os
import argparse
from openai import OpenAI
from reflect_sdk import ReflectClient
DEFAULT_TASK = "Who is Sonam Pankaj?"
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Interactive Reflect SDK demo.")
parser.add_argument("--base-url", default="http://localhost:8000", help="Reflect API base URL.")
parser.add_argument("--project-id", default=os.getenv("REFLECT_PROJECT_ID"), help="Reflect project id.")
parser.add_argument("--reflect-api-key", default=os.getenv("REFLECT_API_KEY"), help="Reflect API key.")
parser.add_argument("--model", default="gpt-5.4-mini", help="OpenAI model to use.")
parser.add_argument("--task", default=DEFAULT_TASK, help="The task for the model to solve.")
parser.add_argument("--limit", type=int, default=3, help="Max number of memories to retrieve.")
args = parser.parse_args()
if not args.project_id:
raise RuntimeError("Set REFLECT_PROJECT_ID or pass --project-id.")
if not args.reflect_api_key:
raise RuntimeError("Set REFLECT_API_KEY or pass --reflect-api-key.")
return args
def ask_for_review() -> tuple[str, str | None]:
"""Prompt the user to pass, fail, or defer the review.
Deferring stores the trace without a review - useful when you want to
review in bulk later via the dashboard or the API.
"""
while True:
choice = input("Was this answer correct? [y/n/d (defer)]: ").strip().lower()
if choice in {"y", "yes"}:
return "pass", None
if choice in {"n", "no"}:
feedback = input("What was wrong? (used as learning feedback): ").strip()
return "fail", feedback or "The answer was incorrect."
if choice in {"d", "defer"}:
return "defer", None
print("Please enter 'y', 'n', or 'd'.")
def main() -> None:
args = parse_args()
openai_api_key = os.getenv("OPENAI_API_KEY")
if not openai_api_key:
raise RuntimeError("OPENAI_API_KEY must be set.")
# --- Step 1: Connect to Reflect ---
reflect = ReflectClient(
base_url=args.base_url,
api_key=args.reflect_api_key,
project_id=args.project_id,
)
# --- Step 2: Augment the task with relevant memories ---
# Reflect retrieves past traces and injects their insights into the prompt.
augmented = reflect.augment_with_memories(task=args.task, limit=args.limit)
print(f"Task: {args.task}")
print(f"Retrieved {len(augmented.memories)} relevant memories.\n")
# --- Step 3: Solve with an LLM ---
messages = [
{
"role": "system",
"content": (
"Solve the user's task. Use any relevant memories included in the prompt. "
"Respond concisely."
),
},
{"role": "user", "content": augmented.augmented_task},
]
openai = OpenAI(api_key=openai_api_key)
response = openai.chat.completions.create(model=args.model, messages=messages)
answer = (response.choices[0].message.content or "").strip()
print("Model answer:")
print(answer)
print()
# --- Step 4: Review the answer ---
review_result, feedback = ask_for_review()
# --- Step 5: Store the trace so Reflect can learn from it ---
# When review_result is "pass" or "fail", Reflect immediately generates a
# reflection and updates utility scores for memory ranking.
# When deferred (review_result=None), the trace is stored without a review
# and can be reviewed later via the dashboard or the API.
trajectory = messages + [{"role": "assistant", "content": answer}]
trace = reflect.create_trace_and_wait(
task=args.task,
trajectory=trajectory,
retrieved_memory_ids=[m.id for m in augmented.memories],
model=args.model,
review_result=None if review_result == "defer" else review_result,
feedback_text=feedback,
)
print(f"\nResult: {review_result}")
if feedback:
print(f"Feedback: {feedback}")
print(f"Trace id: {trace.id}")
print(f"Review: {trace.review_status}")
if trace.created_memory_id:
print(f"Memory created: {trace.created_memory_id}")
if __name__ == "__main__":
main()