Overview
Create a basic chatbot that maintains conversation history and responds to user messages.Python Implementation
Copy
from openai import OpenAI
import sys
client = OpenAI(
api_key="sk-savegate-xxxxxxxxxxxxx",
base_url="https://api.savegate.ai/v1"
)
def chat_bot():
"""Simple chatbot with conversation history"""
conversation = [
{"role": "system", "content": "You are a helpful and friendly assistant."}
]
print("Chatbot started! Type 'quit' to exit.\n")
while True:
# Get user input
user_input = input("You: ").strip()
if user_input.lower() in ['quit', 'exit', 'bye']:
print("Goodbye!")
break
if not user_input:
continue
# Add user message to conversation
conversation.append({"role": "user", "content": user_input})
# Get response from model
try:
response = client.chat.completions.create(
model="gpt-4",
messages=conversation,
temperature=0.7
)
assistant_message = response.choices[0].message.content
# Add assistant response to conversation
conversation.append({"role": "assistant", "content": assistant_message})
print(f"Assistant: {assistant_message}\n")
except Exception as e:
print(f"Error: {e}\n")
if __name__ == "__main__":
chat_bot()
Node.js Implementation
Copy
import OpenAI from 'openai';
import * as readline from 'readline';
const client = new OpenAI({
apiKey: 'sk-savegate-xxxxxxxxxxxxx',
baseURL: 'https://api.savegate.ai/v1'
});
async function chatBot() {
const conversation = [
{ role: 'system', content: 'You are a helpful and friendly assistant.' }
];
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
console.log("Chatbot started! Type 'quit' to exit.\n");
const askQuestion = () => {
rl.question('You: ', async (input) => {
const userInput = input.trim();
if (['quit', 'exit', 'bye'].includes(userInput.toLowerCase())) {
console.log('Goodbye!');
rl.close();
return;
}
if (!userInput) {
askQuestion();
return;
}
// Add user message
conversation.push({ role: 'user', content: userInput });
try {
// Get response
const response = await client.chat.completions.create({
model: 'gpt-4',
messages: conversation,
temperature: 0.7
});
const assistantMessage = response.choices[0].message.content;
// Add assistant response
conversation.push({ role: 'assistant', content: assistantMessage });
console.log(`Assistant: ${assistantMessage}\n`);
} catch (error) {
console.error(`Error: ${error.message}\n`);
}
askQuestion();
});
};
askQuestion();
}
chatBot();
Enhanced Features
Streaming Responses
Add real-time streaming for better UX:Copy
def chat_bot_streaming():
conversation = [
{"role": "system", "content": "You are a helpful assistant."}
]
print("Chatbot with streaming started!\n")
while True:
user_input = input("You: ").strip()
if user_input.lower() in ['quit', 'exit']:
break
conversation.append({"role": "user", "content": user_input})
print("Assistant: ", end="", flush=True)
full_response = ""
stream = client.chat.completions.create(
model="gpt-4",
messages=conversation,
temperature=0.7,
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
print(content, end="", flush=True)
full_response += content
print("\n")
conversation.append({"role": "assistant", "content": full_response})
Multi-Model Support
Let users choose the model:Copy
MODELS = {
"1": {"name": "GPT-4", "id": "gpt-4"},
"2": {"name": "Claude 3.5 Sonnet", "id": "claude-3-5-sonnet-20241022"},
"3": {"name": "GPT-3.5 (Fast)", "id": "gpt-3.5-turbo"},
}
def select_model():
print("\nSelect a model:")
for key, model in MODELS.items():
print(f"{key}. {model['name']}")
choice = input("\nEnter number: ").strip()
return MODELS.get(choice, MODELS["1"])["id"]
def chat_bot_multi_model():
model = select_model()
print(f"\nUsing model: {model}\n")
conversation = [
{"role": "system", "content": "You are a helpful assistant."}
]
while True:
user_input = input("You: ").strip()
if user_input.lower() == 'switch':
model = select_model()
print(f"\nSwitched to: {model}\n")
continue
if user_input.lower() in ['quit', 'exit']:
break
conversation.append({"role": "user", "content": user_input})
response = client.chat.completions.create(
model=model,
messages=conversation,
temperature=0.7
)
assistant_message = response.choices[0].message.content
conversation.append({"role": "assistant", "content": assistant_message})
print(f"Assistant: {assistant_message}\n")
Save Conversation History
Copy
import json
from datetime import datetime
def save_conversation(conversation, filename=None):
if filename is None:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"chat_{timestamp}.json"
with open(filename, 'w') as f:
json.dump({
'timestamp': datetime.now().isoformat(),
'messages': conversation
}, f, indent=2)
print(f"\nConversation saved to {filename}")
def load_conversation(filename):
with open(filename, 'r') as f:
data = json.load(f)
return data['messages']
# Usage in chatbot
def chat_bot_with_save():
conversation = [
{"role": "system", "content": "You are a helpful assistant."}
]
while True:
user_input = input("You: ").strip()
if user_input.lower() == 'save':
save_conversation(conversation)
continue
if user_input.lower() in ['quit', 'exit']:
save = input("Save conversation? (y/n): ")
if save.lower() == 'y':
save_conversation(conversation)
break
# ... rest of chat logic
Web Interface
Create a simple web chat using Flask:Copy
from flask import Flask, render_template, request, jsonify
from openai import OpenAI
app = Flask(__name__)
client = OpenAI(
api_key="sk-savegate-xxxxxxxxxxxxx",
base_url="https://api.savegate.ai/v1"
)
conversations = {}
@app.route('/')
def home():
return render_template('chat.html')
@app.route('/chat', methods=['POST'])
def chat():
data = request.json
session_id = data.get('session_id', 'default')
message = data.get('message')
# Initialize conversation for new sessions
if session_id not in conversations:
conversations[session_id] = [
{"role": "system", "content": "You are a helpful assistant."}
]
# Add user message
conversations[session_id].append({"role": "user", "content": message})
# Get response
response = client.chat.completions.create(
model="gpt-4",
messages=conversations[session_id],
temperature=0.7
)
assistant_message = response.choices[0].message.content
# Add assistant response
conversations[session_id].append({"role": "assistant", "content": assistant_message})
return jsonify({"response": assistant_message})
if __name__ == '__main__':
app.run(debug=True)
Next Steps
Learn how to implement streaming for real-time responses