knowledge-base/.env.example
2025-09-09 13:05:16 +05:30

38 lines
1.5 KiB
Text

# API Configuration - UPDATE THESE WITH YOUR VALUES
OPENAI_API_KEY=sk-your-openai-api-key-here
OPENAI_COMPAT_API_KEY=sk-your-openai-compatible-api-key-here
OPENAI_BASE_URL=https://your-openai-compatible-endpoint.com/v1
EMBEDDER_API_KEY=AIzaSy-your-google-gemini-api-key-here
# Database Configuration
QDRANT_HOST=qdrant
QDRANT_PORT=6333
QDRANT_COLLECTION_NAME=mem0
# Neo4j Configuration
NEO4J_AUTH=neo4j/mem0_neo4j_password
NEO4J_URI=bolt://neo4j:7687
NEO4J_USERNAME=neo4j
NEO4J_PASSWORD=mem0_neo4j_password
# Application Configuration
BACKEND_PORT=8000
FRONTEND_PORT=3000
LOG_LEVEL=INFO
CORS_ORIGINS=http://localhost:3000
# Model Configuration - Intelligent Routing
# These models are automatically selected based on task complexity
DEFAULT_MODEL=claude-sonnet-4 # General purpose model
EXTRACTION_MODEL=claude-sonnet-4 # Memory extraction and processing
FAST_MODEL=o4-mini # Simple/quick responses
ANALYTICAL_MODEL=gemini-2.5-pro # Analysis and comparison tasks
REASONING_MODEL=claude-sonnet-4 # Complex reasoning tasks
EXPERT_MODEL=o3 # Expert-level comprehensive analysis
# IMPORTANT NOTES:
# - Ensure all models are available on your OpenAI-compatible endpoint
# - Verify model availability: curl -H "Authorization: Bearer $API_KEY" $BASE_URL/v1/models
# - Neo4j must be version 5.18+ for vector.similarity.cosine() function
# - Qdrant vector database for embeddings storage (replaces PostgreSQL+pgvector)
# - Ollama must be running locally with nomic-embed-text:latest model