Create AI Assistant

Build powerful, customizable AI assistants with specialized knowledge, unique personalities, and advanced capabilities. Perfect for customer support, personal productivity, education, and business automation.

Overview

AI assistants can be customized for:
  • Specialized knowledge - Domain expertise in specific fields
  • Custom personalities - Unique voice, tone, and communication style
  • Advanced capabilities - Function calling, web search, file processing
  • Role-specific behavior - Tailored responses for different use cases
  • Memory and context - Persistent conversations and learning

Custom Knowledge

Train on specific documents, data, and expertise

Function Calling

Integrate with APIs, databases, and external tools

Personality Design

Create unique voice, tone, and communication style

Multi-Modal

Handle text, images, audio, and documents

Quick Start

import requests
import json
from datetime import datetime

class AIAssistant:
    def __init__(self, api_key, assistant_config=None):
        self.api_key = api_key
        self.config = assistant_config or self.get_default_config()
        self.conversation_history = []
        self.memory = {}
        
    def get_default_config(self):
        """Default assistant configuration"""
        return {
            "name": "Assistant",
            "role": "helpful AI assistant",
            "personality": "friendly, professional, and knowledgeable",
            "expertise": [],
            "capabilities": ["conversation", "analysis", "problem_solving"],
            "response_style": "clear and concise",
            "model": "gpt-4o"
        }
    
    def chat(self, user_message, context=None):
        """Have a conversation with the assistant"""
        
        # Build system prompt from configuration
        system_prompt = self.build_system_prompt()
        
        # Add context if provided
        if context:
            user_message = f"Context: {context}\n\nUser: {user_message}"
        
        # Build conversation with history
        messages = [{"role": "system", "content": system_prompt}]
        
        # Add conversation history (last 10 exchanges)
        messages.extend(self.conversation_history[-20:])
        
        # Add current message
        messages.append({"role": "user", "content": user_message})
        
        response = requests.post(
            "https://api.anyapi.ai/v1/chat/completions",
            headers={
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json"
            },
            json={
                "model": self.config["model"],
                "messages": messages,
                "temperature": 0.7,
                "max_tokens": 1000
            }
        )
        
        assistant_response = response.json()["choices"][0]["message"]["content"]
        
        # Update conversation history
        self.conversation_history.extend([
            {"role": "user", "content": user_message},
            {"role": "assistant", "content": assistant_response}
        ])
        
        return assistant_response
    
    def build_system_prompt(self):
        """Build system prompt from configuration"""
        
        prompt = f"""You are {self.config['name']}, a {self.config['role']}.

Personality: {self.config['personality']}
Response Style: {self.config['response_style']}"""
        
        if self.config['expertise']:
            prompt += f"\n\nExpertise: You are knowledgeable in {', '.join(self.config['expertise'])}"
        
        if self.config['capabilities']:
            prompt += f"\nCapabilities: {', '.join(self.config['capabilities'])}"
        
        prompt += "\n\nAlways maintain your personality and provide helpful, accurate responses."
        
        return prompt
    
    def update_personality(self, personality_traits):
        """Update assistant personality"""
        self.config.update(personality_traits)
        
    def add_expertise(self, domain, knowledge_base=None):
        """Add domain expertise to the assistant"""
        if domain not in self.config['expertise']:
            self.config['expertise'].append(domain)
        
        if knowledge_base:
            self.memory[f"expertise_{domain}"] = knowledge_base
    
    def set_context_memory(self, key, value):
        """Set persistent memory for the assistant"""
        self.memory[key] = value
    
    def get_context_memory(self, key):
        """Retrieve from persistent memory"""
        return self.memory.get(key)

# Usage examples

# Create a basic assistant
assistant = AIAssistant("YOUR_API_KEY")

response = assistant.chat("Hello! Can you help me with Python programming?")
print(f"Assistant: {response}")

# Create a specialized customer support assistant
support_config = {
    "name": "SupportBot",
    "role": "customer support specialist",
    "personality": "patient, empathetic, and solution-focused",
    "expertise": ["product knowledge", "troubleshooting", "customer service"],
    "response_style": "helpful and professional with step-by-step guidance",
    "model": "gpt-4o"
}

support_assistant = AIAssistant("YOUR_API_KEY", support_config)

response = support_assistant.chat("I'm having trouble with my order")
print(f"Support: {response}")

# Create a technical assistant with expertise
tech_config = {
    "name": "TechAdvisor", 
    "role": "senior software engineer and architect",
    "personality": "analytical, precise, and thorough",
    "expertise": ["software architecture", "system design", "best practices"],
    "response_style": "detailed technical explanations with examples",
    "model": "gpt-4o"
}

tech_assistant = AIAssistant("YOUR_API_KEY", tech_config)

response = tech_assistant.chat("How should I design a scalable microservices architecture?")
print(f"Tech Advisor: {response}")

Advanced Assistant Features

Function-Enabled Assistant

class FunctionEnabledAssistant(AIAssistant):
    def __init__(self, api_key, assistant_config=None):
        super().__init__(api_key, assistant_config)
        self.available_functions = {}
        self.setup_default_functions()
    
    def setup_default_functions(self):
        """Setup default function capabilities"""
        
        self.available_functions = {
            "get_current_time": {
                "function": self.get_current_time,
                "description": "Get the current date and time",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "timezone": {
                            "type": "string",
                            "description": "Timezone (optional, defaults to UTC)"
                        }
                    }
                }
            },
            "search_web": {
                "function": self.search_web,
                "description": "Search the web for current information",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "query": {
                            "type": "string",
                            "description": "Search query"
                        },
                        "num_results": {
                            "type": "integer",
                            "description": "Number of results to return",
                            "default": 5
                        }
                    },
                    "required": ["query"]
                }
            },
            "calculate": {
                "function": self.calculate,
                "description": "Perform mathematical calculations",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "expression": {
                            "type": "string",
                            "description": "Mathematical expression to evaluate"
                        }
                    },
                    "required": ["expression"]
                }
            },
            "send_email": {
                "function": self.send_email,
                "description": "Send an email",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "to": {"type": "string", "description": "Recipient email"},
                        "subject": {"type": "string", "description": "Email subject"},
                        "body": {"type": "string", "description": "Email body"}
                    },
                    "required": ["to", "subject", "body"]
                }
            }
        }
    
    def add_custom_function(self, name, function, description, parameters):
        """Add a custom function to the assistant"""
        
        self.available_functions[name] = {
            "function": function,
            "description": description,
            "parameters": parameters
        }
    
    def chat_with_functions(self, user_message, context=None):
        """Chat with function calling capabilities"""
        
        system_prompt = self.build_system_prompt()
        system_prompt += "\n\nYou have access to functions. Use them when appropriate to help the user."
        
        if context:
            user_message = f"Context: {context}\n\nUser: {user_message}"
        
        # Prepare function definitions for the API
        functions = []
        for name, func_data in self.available_functions.items():
            functions.append({
                "name": name,
                "description": func_data["description"],
                "parameters": func_data["parameters"]
            })
        
        messages = [{"role": "system", "content": system_prompt}]
        messages.extend(self.conversation_history[-20:])
        messages.append({"role": "user", "content": user_message})
        
        response = requests.post(
            "https://api.anyapi.ai/v1/chat/completions",
            headers={
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json"
            },
            json={
                "model": self.config["model"],
                "messages": messages,
                "functions": functions,
                "function_call": "auto",
                "temperature": 0.7
            }
        )
        
        response_data = response.json()
        message = response_data["choices"][0]["message"]
        
        # Handle function calls
        if message.get("function_call"):
            function_name = message["function_call"]["name"]
            function_args = json.loads(message["function_call"]["arguments"])
            
            # Execute the function
            if function_name in self.available_functions:
                function_result = self.available_functions[function_name]["function"](**function_args)
                
                # Continue conversation with function result
                messages.append(message)
                messages.append({
                    "role": "function",
                    "name": function_name,
                    "content": json.dumps(function_result)
                })
                
                # Get final response
                final_response = requests.post(
                    "https://api.anyapi.ai/v1/chat/completions",
                    headers={
                        "Authorization": f"Bearer {self.api_key}",
                        "Content-Type": "application/json"
                    },
                    json={
                        "model": self.config["model"],
                        "messages": messages,
                        "temperature": 0.7
                    }
                )
                
                assistant_response = final_response.json()["choices"][0]["message"]["content"]
                
                # Update history
                self.conversation_history.extend([
                    {"role": "user", "content": user_message},
                    {"role": "assistant", "content": assistant_response}
                ])
                
                return assistant_response
        
        # No function call needed
        assistant_response = message["content"]
        self.conversation_history.extend([
            {"role": "user", "content": user_message},
            {"role": "assistant", "content": assistant_response}
        ])
        
        return assistant_response
    
    def get_current_time(self, timezone="UTC"):
        """Function to get current time"""
        from datetime import datetime
        import pytz
        
        try:
            tz = pytz.timezone(timezone)
            current_time = datetime.now(tz)
            return {
                "current_time": current_time.strftime("%Y-%m-%d %H:%M:%S %Z"),
                "timezone": timezone
            }
        except:
            utc_time = datetime.utcnow()
            return {
                "current_time": utc_time.strftime("%Y-%m-%d %H:%M:%S UTC"),
                "timezone": "UTC"
            }
    
    def search_web(self, query, num_results=5):
        """Function to search the web"""
        # Implement web search (placeholder)
        return {
            "query": query,
            "results": [
                {
                    "title": f"Search result for: {query}",
                    "url": "https://example.com",
                    "snippet": "This is a sample search result."
                }
            ],
            "note": "This is a placeholder. Integrate with actual search API."
        }
    
    def calculate(self, expression):
        """Function to perform calculations"""
        try:
            # Safe evaluation of mathematical expressions
            import ast
            import operator
            
            # Supported operations
            ops = {
                ast.Add: operator.add,
                ast.Sub: operator.sub,
                ast.Mult: operator.mul,
                ast.Div: operator.truediv,
                ast.Pow: operator.pow,
                ast.USub: operator.neg,
            }
            
            def eval_expr(node):
                if isinstance(node, ast.Num):
                    return node.n
                elif isinstance(node, ast.BinOp):
                    return ops[type(node.op)](eval_expr(node.left), eval_expr(node.right))
                elif isinstance(node, ast.UnaryOp):
                    return ops[type(node.op)](eval_expr(node.operand))
                else:
                    raise TypeError(node)
            
            result = eval_expr(ast.parse(expression, mode='eval').body)
            return {
                "expression": expression,
                "result": result
            }
        except Exception as e:
            return {
                "expression": expression,
                "error": f"Calculation error: {str(e)}"
            }
    
    def send_email(self, to, subject, body):
        """Function to send email (placeholder)"""
        # Implement email sending (placeholder)
        return {
            "status": "success",
            "message": f"Email sent to {to}",
            "note": "This is a placeholder. Integrate with actual email service."
        }

# Usage
function_assistant = FunctionEnabledAssistant("YOUR_API_KEY", {
    "name": "FunctionBot",
    "role": "multi-capability assistant with tools",
    "personality": "helpful and resourceful",
    "capabilities": ["conversation", "web search", "calculations", "email"],
    "model": "gpt-4o"
})

# Example usage
response = function_assistant.chat_with_functions("What time is it in Tokyo?")
print(f"Assistant: {response}")

response = function_assistant.chat_with_functions("Calculate 15% tip on a $67.50 bill")
print(f"Assistant: {response}")

Knowledge-Based Assistant

class KnowledgeAssistant(AIAssistant):
    def __init__(self, api_key, assistant_config=None):
        super().__init__(api_key, assistant_config)
        self.knowledge_base = {}
        self.document_embeddings = {}
    
    def add_knowledge_from_text(self, knowledge_key, text_content, source=None):
        """Add knowledge from text content"""
        
        # Split text into chunks for better processing
        chunks = self.chunk_text(text_content, chunk_size=500)
        
        # Generate embeddings for each chunk
        embeddings = []
        for chunk in chunks:
            embedding = self.generate_embedding(chunk)
            embeddings.append({
                "text": chunk,
                "embedding": embedding,
                "source": source
            })
        
        self.knowledge_base[knowledge_key] = {
            "content": text_content,
            "chunks": embeddings,
            "source": source,
            "added_at": datetime.now().isoformat()
        }
    
    def add_knowledge_from_file(self, knowledge_key, file_path):
        """Add knowledge from a file"""
        
        try:
            with open(file_path, 'r', encoding='utf-8') as file:
                content = file.read()
            
            self.add_knowledge_from_text(knowledge_key, content, source=file_path)
            return f"Knowledge added from {file_path}"
        
        except Exception as e:
            return f"Error reading file: {str(e)}"
    
    def search_knowledge(self, query, top_k=3):
        """Search through knowledge base for relevant information"""
        
        query_embedding = self.generate_embedding(query)
        
        # Search through all knowledge chunks
        results = []
        for knowledge_key, knowledge_data in self.knowledge_base.items():
            for chunk_data in knowledge_data["chunks"]:
                similarity = self.cosine_similarity(query_embedding, chunk_data["embedding"])
                
                if similarity > 0.3:  # Minimum relevance threshold
                    results.append({
                        "knowledge_key": knowledge_key,
                        "text": chunk_data["text"],
                        "similarity": similarity,
                        "source": chunk_data["source"]
                    })
        
        # Sort by similarity and return top results
        results.sort(key=lambda x: x["similarity"], reverse=True)
        return results[:top_k]
    
    def chat_with_knowledge(self, user_message, context=None):
        """Chat using knowledge base for enhanced responses"""
        
        # Search knowledge base for relevant information
        relevant_knowledge = self.search_knowledge(user_message)
        
        # Build enhanced system prompt with knowledge
        system_prompt = self.build_system_prompt()
        
        if relevant_knowledge:
            knowledge_context = "\n\n".join([
                f"Knowledge from {item['knowledge_key']}: {item['text']}"
                for item in relevant_knowledge
            ])
            
            system_prompt += f"\n\nRelevant Knowledge:\n{knowledge_context}\n\nUse this knowledge to provide accurate, informed responses."
        
        if context:
            user_message = f"Context: {context}\n\nUser: {user_message}"
        
        messages = [{"role": "system", "content": system_prompt}]
        messages.extend(self.conversation_history[-20:])
        messages.append({"role": "user", "content": user_message})
        
        response = requests.post(
            "https://api.anyapi.ai/v1/chat/completions",
            headers={
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json"
            },
            json={
                "model": self.config["model"],
                "messages": messages,
                "temperature": 0.7,
                "max_tokens": 1000
            }
        )
        
        assistant_response = response.json()["choices"][0]["message"]["content"]
        
        # Update conversation history
        self.conversation_history.extend([
            {"role": "user", "content": user_message},
            {"role": "assistant", "content": assistant_response}
        ])
        
        return {
            "response": assistant_response,
            "knowledge_used": [item["knowledge_key"] for item in relevant_knowledge],
            "sources": [item["source"] for item in relevant_knowledge if item["source"]]
        }
    
    def generate_embedding(self, text):
        """Generate embedding for text"""
        
        response = requests.post(
            "https://api.anyapi.ai/v1/embeddings",
            headers={
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json"
            },
            json={
                "model": "text-embedding-3-large",
                "input": text
            }
        )
        
        return response.json()["data"][0]["embedding"]
    
    def chunk_text(self, text, chunk_size=500, overlap=50):
        """Split text into overlapping chunks"""
        
        words = text.split()
        chunks = []
        
        for i in range(0, len(words), chunk_size - overlap):
            chunk_words = words[i:i + chunk_size]
            chunk_text = " ".join(chunk_words)
            chunks.append(chunk_text)
            
            if i + chunk_size >= len(words):
                break
        
        return chunks
    
    def cosine_similarity(self, vec1, vec2):
        """Calculate cosine similarity between two vectors"""
        
        import numpy as np
        
        vec1 = np.array(vec1)
        vec2 = np.array(vec2)
        
        dot_product = np.dot(vec1, vec2)
        magnitude1 = np.linalg.norm(vec1)
        magnitude2 = np.linalg.norm(vec2)
        
        if magnitude1 == 0 or magnitude2 == 0:
            return 0
        
        return dot_product / (magnitude1 * magnitude2)
    
    def list_knowledge(self):
        """List all knowledge in the base"""
        
        knowledge_summary = []
        for key, data in self.knowledge_base.items():
            knowledge_summary.append({
                "key": key,
                "source": data["source"],
                "chunk_count": len(data["chunks"]),
                "added_at": data["added_at"]
            })
        
        return knowledge_summary

# Usage
knowledge_assistant = KnowledgeAssistant("YOUR_API_KEY", {
    "name": "KnowledgeBot",
    "role": "knowledge-enhanced assistant",
    "personality": "scholarly, accurate, and helpful",
    "capabilities": ["conversation", "knowledge search", "document analysis"],
    "model": "gpt-4o"
})

# Add knowledge
knowledge_assistant.add_knowledge_from_text(
    "company_policies",
    """
    Our company offers flexible work arrangements including remote work options.
    Employees can work from home up to 3 days per week with manager approval.
    We provide comprehensive health insurance including dental and vision coverage.
    Vacation policy allows for 20 days of PTO per year, increasing with tenure.
    """,
    source="Employee Handbook 2024"
)

# Chat with knowledge
result = knowledge_assistant.chat_with_knowledge(
    "What are the work from home policies?"
)

print(f"Response: {result['response']}")
print(f"Knowledge used: {result['knowledge_used']}")
print(f"Sources: {result['sources']}")

Multi-Modal Assistant

class MultiModalAssistant(AIAssistant):
    def __init__(self, api_key, assistant_config=None):
        super().__init__(api_key, assistant_config)
        self.supported_modes = ["text", "image", "audio", "document"]
    
    def process_image(self, image_path, user_question=None):
        """Process and analyze images"""
        
        import base64
        
        # Encode image to base64
        with open(image_path, "rb") as image_file:
            image_base64 = base64.b64encode(image_file.read()).decode('utf-8')
        
        prompt = user_question or "Analyze this image and describe what you see in detail."
        
        response = requests.post(
            "https://api.anyapi.ai/v1/chat/completions",
            headers={
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json"
            },
            json={
                "model": "gpt-4o",
                "messages": [
                    {
                        "role": "user",
                        "content": [
                            {"type": "text", "text": prompt},
                            {
                                "type": "image_url",
                                "image_url": {
                                    "url": f"data:image/jpeg;base64,{image_base64}"
                                }
                            }
                        ]
                    }
                ],
                "max_tokens": 1000
            }
        )
        
        return response.json()["choices"][0]["message"]["content"]
    
    def process_audio(self, audio_path, task="transcription"):
        """Process audio files - transcription or analysis"""
        
        with open(audio_path, "rb") as audio_file:
            if task == "transcription":
                response = requests.post(
                    "https://api.anyapi.ai/v1/audio/transcriptions",
                    headers={
                        "Authorization": f"Bearer {self.api_key}"
                    },
                    files={
                        "file": audio_file,
                        "model": (None, "whisper-1"),
                        "response_format": (None, "json")
                    }
                )
                
                transcription = response.json()["text"]
                
                # Analyze the transcription
                analysis = self.chat(f"Analyze this transcription: {transcription}")
                
                return {
                    "transcription": transcription,
                    "analysis": analysis
                }
    
    def process_document(self, document_path, question=None):
        """Process documents (PDF, DOCX, TXT)"""
        
        # Extract text from document
        document_text = self.extract_document_text(document_path)
        
        if question:
            response = self.chat(f"Based on this document: {document_text}\n\nQuestion: {question}")
        else:
            response = self.chat(f"Summarize this document: {document_text}")
        
        return {
            "document_content": document_text[:500] + "...",  # Preview
            "response": response
        }
    
    def multi_modal_chat(self, inputs, question=None):
        """Handle multiple input types in one conversation"""
        
        results = {}
        combined_context = []
        
        for input_type, input_path in inputs.items():
            if input_type == "image":
                result = self.process_image(input_path, question)
                results["image_analysis"] = result
                combined_context.append(f"Image analysis: {result}")
            
            elif input_type == "audio":
                result = self.process_audio(input_path)
                results["audio_analysis"] = result
                combined_context.append(f"Audio transcription: {result['transcription']}")
            
            elif input_type == "document":
                result = self.process_document(input_path, question)
                results["document_analysis"] = result
                combined_context.append(f"Document content: {result['document_content']}")
        
        # Synthesize all inputs
        if len(combined_context) > 1:
            synthesis_prompt = f"""
            Based on all the analyzed content:
            {chr(10).join(combined_context)}
            
            Question: {question or 'Provide a comprehensive analysis of all the provided content.'}
            """
            
            synthesis = self.chat(synthesis_prompt)
            results["synthesis"] = synthesis
        
        return results
    
    def extract_document_text(self, document_path):
        """Extract text from various document formats"""
        
        import os
        file_extension = os.path.splitext(document_path)[1].lower()
        
        try:
            if file_extension == '.txt':
                with open(document_path, 'r', encoding='utf-8') as file:
                    return file.read()
            
            elif file_extension == '.pdf':
                # Placeholder for PDF extraction
                return "PDF content extraction would be implemented here."
            
            elif file_extension in ['.docx', '.doc']:
                # Placeholder for Word document extraction
                return "Word document extraction would be implemented here."
            
            else:
                return f"Unsupported document type: {file_extension}"
        
        except Exception as e:
            return f"Error extracting document: {str(e)}"

# Usage
multimodal_assistant = MultiModalAssistant("YOUR_API_KEY", {
    "name": "MultiBot",
    "role": "multi-modal AI assistant",
    "personality": "analytical and comprehensive",
    "capabilities": ["text", "image analysis", "audio processing", "document analysis"],
    "model": "gpt-4o"
})

# Process single image
image_analysis = multimodal_assistant.process_image(
    "chart.jpg", 
    "What insights can you extract from this business chart?"
)
print(f"Image analysis: {image_analysis}")

# Process audio
audio_result = multimodal_assistant.process_audio("meeting.mp3")
print(f"Audio transcription: {audio_result['transcription']}")
print(f"Audio analysis: {audio_result['analysis']}")

# Multi-modal analysis
results = multimodal_assistant.multi_modal_chat({
    "image": "presentation_slide.jpg",
    "document": "report.pdf",
    "audio": "explanation.mp3"
}, "What are the key takeaways from this presentation?")

print(f"Synthesis: {results['synthesis']}")

Assistant Specialized Roles

Customer Support Assistant

def create_support_assistant(api_key, company_info):
    """Create a specialized customer support assistant"""
    
    config = {
        "name": "SupportAgent",
        "role": "customer support specialist",
        "personality": "patient, empathetic, solution-focused, and professional",
        "expertise": ["product knowledge", "troubleshooting", "policy information"],
        "response_style": "clear step-by-step guidance with empathetic communication",
        "model": "gpt-4o"
    }
    
    assistant = FunctionEnabledAssistant(api_key, config)
    
    # Add company-specific knowledge
    assistant.add_knowledge_from_text("company_info", company_info)
    
    # Add support-specific functions
    assistant.add_custom_function(
        "create_ticket",
        lambda issue, priority: {"ticket_id": f"TICK-{hash(issue) % 10000}", "status": "created"},
        "Create a support ticket",
        {
            "type": "object",
            "properties": {
                "issue": {"type": "string", "description": "Issue description"},
                "priority": {"type": "string", "description": "Priority level"}
            },
            "required": ["issue", "priority"]
        }
    )
    
    return assistant

Educational Tutor Assistant

def create_tutor_assistant(api_key, subject_area):
    """Create an educational tutor assistant"""
    
    config = {
        "name": f"{subject_area}Tutor",
        "role": f"expert {subject_area} tutor",
        "personality": "encouraging, patient, clear, and adaptive to learning styles",
        "expertise": [subject_area, "pedagogy", "learning techniques"],
        "response_style": "educational with examples, practice questions, and explanations",
        "model": "gpt-4o"
    }
    
    assistant = AIAssistant(api_key, config)
    
    # Add educational methods
    def create_practice_quiz(topic, difficulty):
        return {
            "quiz_questions": [
                f"Question about {topic} at {difficulty} level",
                "Sample multiple choice question",
                "Practice problem to solve"
            ],
            "note": "This would generate actual quiz questions"
        }
    
    assistant.add_custom_function(
        "create_practice_quiz",
        create_practice_quiz,
        "Create practice quiz questions",
        {
            "type": "object",
            "properties": {
                "topic": {"type": "string", "description": "Quiz topic"},
                "difficulty": {"type": "string", "description": "Difficulty level"}
            },
            "required": ["topic", "difficulty"]
        }
    )
    
    return assistant

Business Analyst Assistant

def create_analyst_assistant(api_key):
    """Create a business analyst assistant"""
    
    config = {
        "name": "AnalystBot",
        "role": "senior business analyst",
        "personality": "analytical, data-driven, strategic, and thorough",
        "expertise": ["data analysis", "business strategy", "market research", "KPIs"],
        "response_style": "detailed analysis with data-backed recommendations",
        "model": "gpt-4o"
    }
    
    assistant = FunctionEnabledAssistant(api_key, config)
    
    # Add analysis functions
    def analyze_data(data_description, analysis_type):
        return {
            "analysis_type": analysis_type,
            "data_summary": f"Analysis of {data_description}",
            "insights": ["Key insight 1", "Key insight 2", "Key insight 3"],
            "recommendations": ["Recommendation 1", "Recommendation 2"],
            "note": "This would perform actual data analysis"
        }
    
    assistant.add_custom_function(
        "analyze_data",
        analyze_data,
        "Analyze business data",
        {
            "type": "object",
            "properties": {
                "data_description": {"type": "string", "description": "Description of data to analyze"},
                "analysis_type": {"type": "string", "description": "Type of analysis needed"}
            },
            "required": ["data_description", "analysis_type"]
        }
    )
    
    return assistant

Best Practices

1. Assistant Design

  • Clear purpose: Define specific role and capabilities
  • Consistent personality: Maintain voice and tone throughout
  • Appropriate expertise: Match knowledge to use case
  • User-focused: Design for specific user needs and workflows

2. Knowledge Management

  • Structured data: Organize knowledge logically
  • Regular updates: Keep information current and accurate
  • Source tracking: Maintain clear source attribution
  • Quality control: Verify accuracy of knowledge base

3. Function Integration

  • Essential functions: Add only necessary capabilities
  • Error handling: Implement robust error management
  • Security: Validate inputs and protect sensitive operations
  • Documentation: Clear function descriptions and parameters

4. Conversation Flow

  • Context awareness: Maintain conversation context
  • Memory management: Handle long conversations efficiently
  • Response relevance: Ensure responses match user intent
  • Natural interaction: Create smooth, human-like exchanges

Common Use Cases

Customer Support

Help desk automation, FAQ assistance, ticket management

Personal Productivity

Task management, scheduling, information organization

Education & Training

Tutoring, skill development, knowledge assessment

Business Operations

Data analysis, reporting, workflow automation

Content Creation

Writing assistance, idea generation, content optimization

Research & Analysis

Information gathering, data interpretation, insights

Technical Support

Code assistance, troubleshooting, documentation

Creative Projects

Brainstorming, design feedback, creative direction

Model Recommendations

Assistant TypeRecommended ModelReasoning
General PurposeGPT-4oBalanced capabilities, reliable
Technical/CodeClaude 3.5 SonnetStrong analytical abilities
Creative TasksGPT-4oGood creativity and ideation
Data AnalysisClaude 3.5 SonnetExcellent reasoning capabilities
Customer SupportGPT-4oNatural conversation flow
EducationalClaude 3.5 SonnetClear explanations, patient tone

Getting Started