Overview
The Chatbot module provides a Q&A interface that answers user questions using documents stored in a dedicated workspace. It combines conversational AI with RAG (Retrieval-Augmented Generation) to provide accurate, document-based answers.
You can also send context to the chatbot using a backend prompt. For example adding variables from your datalake to add context about the user using the chatbot.
Use Case: Build a chatbot that answers questions based on your company’s knowledge base (policies, FAQs, product documentation, etc.)
Note:
The examples in this documentation use a local Flask web server. Be aware of this if you want to deploy to production.
This example was created using an HTML frontend that sends the variables to the backend.
How It Works
- User sends a message
- System builds context from conversation history
- Query is sent to the Document Search API targeting the chatbot workspace
- LLM generates answer based on relevant document content
- Conversation is stored for context in future messages
API Endpoints Used
| Endpoint | Purpose |
|---|
POST /chat/document-search | Query documents and generate answers |
Step-by-Step Implementation
Step 1: Initialize the Service
import requests
from typing import Dict, Any, Optional, List
class ChatbotService:
def __init__(self, api_key: str):
self.api_key = api_key
self.headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
self.conversations = {} # In-memory conversation storage
# Configuration
self.document_search_url = "https://paradigm.lighton.ai/api/v2/chat/document-search"
self.workspace_id = 250 # Your chatbot workspace ID
self.model = "alfred-4.2"
Step 2: Build Conversation Context
def _build_context(
self,
message: str,
conversation_id: Optional[str],
system_prompt: Optional[str]
) -> str:
"""Build query context from conversation history."""
context = ""
# Add system prompt if provided
if system_prompt:
context += f"{system_prompt}\n\n"
# Add conversation history if exists
if conversation_id and conversation_id in self.conversations:
context += "Previous conversation:\n"
for msg in self.conversations[conversation_id]:
role = msg["role"].capitalize()
content = msg["content"]
context += f"{role}: {content}\n"
context += "\n"
return context
Step 3: Send Chat Message
def chat(
self,
message: str,
conversation_id: Optional[str] = None,
system_prompt: Optional[str] = None,
temperature: float = 0.7
) -> Dict[str, Any]:
"""
Send chat message and get response using document search.
Args:
message: User's question
conversation_id: Optional ID for conversation continuity
system_prompt: Optional instructions for the assistant
temperature: Response creativity (0.0-1.0)
Returns:
Response with answer and conversation_id
"""
# Build context from history
context = self._build_context(message, conversation_id, system_prompt)
# Build the query
query = f"{context}Current question: {message}"
# Prepare payload for Document Search
payload = {
"model": self.model,
"query": query,
"workspace_ids": [self.workspace_id],
"company_scope": False,
"private_scope": False,
"tool": "DocumentSearch"
}
# Make API call
response = requests.post(
self.document_search_url,
headers=self.headers,
json=payload,
timeout=150
)
if response.status_code == 200:
data = response.json()
assistant_message = data.get("answer", "")
# Generate conversation ID if not provided
if not conversation_id:
import uuid
conversation_id = str(uuid.uuid4())
self.conversations[conversation_id] = []
# Store conversation history
if conversation_id in self.conversations:
self.conversations[conversation_id].append({
"role": "user",
"content": message
})
self.conversations[conversation_id].append({
"role": "assistant",
"content": assistant_message
})
return {
'response': assistant_message,
'conversation_id': conversation_id,
'status': 'success',
'workspace_id': self.workspace_id
}
else:
raise Exception(f"Document search failed: {response.text}")
Step 4: Manage Conversation History
def get_conversation_history(self, conversation_id: str) -> List[Dict[str, str]]:
"""Get conversation history for a given ID."""
if conversation_id not in self.conversations:
raise ValueError(f"Conversation {conversation_id} not found")
return self.conversations[conversation_id]
def clear_conversation(self, conversation_id: str) -> bool:
"""Clear conversation history."""
if conversation_id in self.conversations:
del self.conversations[conversation_id]
return True
return False
Complete Usage Example
# Initialize chatbot
chatbot = ChatbotService(api_key="your-api-key")
# System prompt for the chatbot
system_prompt = """You are a helpful IT support assistant.
Answer questions based on the company's IT documentation.
Be concise and provide step-by-step instructions when relevant."""
# First message - starts new conversation
result = chatbot.chat(
message="How do I reset my password?",
system_prompt=system_prompt
)
print(f"Assistant: {result['response']}")
conversation_id = result['conversation_id']
# Follow-up message - uses conversation context
result = chatbot.chat(
message="What if I don't receive the reset email?",
conversation_id=conversation_id,
system_prompt=system_prompt
)
print(f"Assistant: {result['response']}")
# Get full conversation history
history = chatbot.get_conversation_history(conversation_id)
for msg in history:
print(f"{msg['role'].upper()}: {msg['content']}")
# Clear conversation when done
chatbot.clear_conversation(conversation_id)
Flask Route Integration
from flask import Blueprint, request, jsonify
chatbot_bp = Blueprint('chatbot', __name__)
chatbot_service = None
def init_chatbot_service(api_key: str):
global chatbot_service
chatbot_service = ChatbotService(api_key)
@chatbot_bp.route('/chat', methods=['POST'])
def chat():
"""Chat endpoint."""
data = request.json
message = data.get('message')
conversation_id = data.get('conversation_id')
system_prompt = data.get('system_prompt')
if not message:
return jsonify({'error': 'Message is required'}), 400
try:
result = chatbot_service.chat(
message=message,
conversation_id=conversation_id,
system_prompt=system_prompt
)
return jsonify(result)
except Exception as e:
return jsonify({'error': str(e)}), 500
@chatbot_bp.route('/conversation/<conversation_id>', methods=['GET'])
def get_history(conversation_id):
"""Get conversation history."""
try:
history = chatbot_service.get_conversation_history(conversation_id)
return jsonify({'history': history})
except ValueError as e:
return jsonify({'error': str(e)}), 404
@chatbot_bp.route('/conversation/<conversation_id>', methods=['DELETE'])
def clear_history(conversation_id):
"""Clear conversation history."""
result = chatbot_service.clear_conversation(conversation_id)
return jsonify({'cleared': result})
Prerequisites
Before using this module:
- Create a workspace for your chatbot documents
- Upload documents to the workspace using the Upload Session API
- Wait for embedding - documents must be fully embedded before querying
- Note the workspace_id - you’ll need it for the service configuration
Configuration
| Parameter | Description | Default |
|---|
workspace_id | ID of the workspace containing chatbot documents | Required |
model | LLM model to use | alfred-4.2 |
timeout | API request timeout in seconds | 150 |
Best Practices
- Use system prompts to define the chatbot’s personality and behavior
- Store conversations persistently in production (database, Redis, etc.)
- Limit conversation history to avoid token limits (keep last 10-20 messages)
- Handle errors gracefully with user-friendly messages
- Keep documents updated in the workspace for accurate answers