AI
API
OpenAI
Claude
Gemini
Integration
Advanced

คู่มือเชื่อมต่อ AI API: OpenAI, Claude, Gemini

เรียนรู้วิธีเชื่อมต่อและใช้งาน AI API จาก OpenAI, Anthropic Claude, Google Gemini พร้อม best practices, error handling และการเปรียบเทียบแต่ละ provider

AI Unlocked Team
29/01/2568
คู่มือเชื่อมต่อ AI API: OpenAI, Claude, Gemini

คู่มือเชื่อมต่อ AI API: OpenAI, Claude, Gemini

การเชื่อมต่อ AI API เป็นทักษะสำคัญสำหรับ developer ที่ต้องการสร้าง AI-powered applications บทความนี้จะพาคุณไปเรียนรู้วิธีใช้งาน AI API จาก 3 providers หลัก พร้อม best practices ที่ควรรู้

Overview ของ AI API Providers

เปรียบเทียบ AI Providers

FeatureOpenAIClaudeGemini
CompanyOpenAIAnthropicGoogle
Best ModelGPT-4oClaude 3.5 SonnetGemini 1.5 Pro
Context Window128K200K2M
VisionYesYesYes
Code GenerationExcellentExcellentGood
PricingMediumMediumLow
Rate LimitsFlexibleModerateGenerous

เลือก AI Provider อย่างไร?

Decision Tree:

ต้องการ context window ใหญ่มาก?
├─ Yes → Gemini (2M tokens)
└─ No →
    ต้องการความปลอดภัยสูง?
    ├─ Yes → Claude (Constitutional AI)
    └─ No →
        ต้องการ ecosystem ที่ใหญ่?
        ├─ Yes → OpenAI (GPT-4)
        └─ No → เลือกตามราคา/performance

OpenAI API

Setup และ Authentication

# ติดตั้ง SDK
npm install openai
# หรือ
pip install openai
// JavaScript/TypeScript
import OpenAI from 'openai';

const openai = new OpenAI({
  apiKey: process.env.OPENAI_API_KEY,
});
# Python
from openai import OpenAI

client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])

Chat Completions API

# Basic chat completion
response = client.chat.completions.create(
    model="gpt-4o",
    messages=[
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": "What is the capital of France?"}
    ],
    temperature=0.7,
    max_tokens=1000
)

print(response.choices[0].message.content)

Streaming Responses

# Streaming for real-time output
stream = client.chat.completions.create(
    model="gpt-4o",
    messages=[{"role": "user", "content": "Write a story about AI"}],
    stream=True
)

for chunk in stream:
    if chunk.choices[0].delta.content:
        print(chunk.choices[0].delta.content, end="")
// JavaScript streaming
const stream = await openai.chat.completions.create({
  model: 'gpt-4o',
  messages: [{ role: 'user', content: 'Write a poem' }],
  stream: true,
});

for await (const chunk of stream) {
  const content = chunk.choices[0]?.delta?.content || '';
  process.stdout.write(content);
}

Function Calling

# Define functions
tools = [
    {
        "type": "function",
        "function": {
            "name": "get_weather",
            "description": "Get current weather for a location",
            "parameters": {
                "type": "object",
                "properties": {
                    "location": {
                        "type": "string",
                        "description": "City name"
                    },
                    "unit": {
                        "type": "string",
                        "enum": ["celsius", "fahrenheit"]
                    }
                },
                "required": ["location"]
            }
        }
    }
]

# Call with tools
response = client.chat.completions.create(
    model="gpt-4o",
    messages=[{"role": "user", "content": "What's the weather in Bangkok?"}],
    tools=tools,
    tool_choice="auto"
)

# Handle tool call
if response.choices[0].message.tool_calls:
    tool_call = response.choices[0].message.tool_calls[0]
    function_name = tool_call.function.name
    arguments = json.loads(tool_call.function.arguments)

    # Execute function
    if function_name == "get_weather":
        result = get_weather(**arguments)

    # Send result back
    response = client.chat.completions.create(
        model="gpt-4o",
        messages=[
            {"role": "user", "content": "What's the weather in Bangkok?"},
            response.choices[0].message,
            {
                "role": "tool",
                "tool_call_id": tool_call.id,
                "content": json.dumps(result)
            }
        ]
    )

Vision API

# Analyze images
response = client.chat.completions.create(
    model="gpt-4o",
    messages=[
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "What's in this image?"},
                {
                    "type": "image_url",
                    "image_url": {
                        "url": "https://example.com/image.jpg",
                        # Or base64: "data:image/jpeg;base64,..."
                    }
                }
            ]
        }
    ]
)

Embeddings API

# Generate embeddings
response = client.embeddings.create(
    model="text-embedding-3-small",
    input="Hello, world!"
)

embedding = response.data[0].embedding
print(f"Embedding dimension: {len(embedding)}")  # 1536

Anthropic Claude API

Setup และ Authentication

pip install anthropic
# หรือ
npm install @anthropic-ai/sdk
# Python
from anthropic import Anthropic

client = Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
// JavaScript
import Anthropic from '@anthropic-ai/sdk';

const anthropic = new Anthropic({
  apiKey: process.env.ANTHROPIC_API_KEY,
});

Messages API

# Basic message
message = client.messages.create(
    model="claude-3-5-sonnet-20241022",
    max_tokens=1024,
    messages=[
        {"role": "user", "content": "Explain quantum computing in simple terms"}
    ]
)

print(message.content[0].text)

System Prompt

# With system prompt
message = client.messages.create(
    model="claude-3-5-sonnet-20241022",
    max_tokens=1024,
    system="You are a helpful Thai language tutor. Always respond in Thai.",
    messages=[
        {"role": "user", "content": "Teach me basic greetings"}
    ]
)

Streaming

# Streaming with Claude
with client.messages.stream(
    model="claude-3-5-sonnet-20241022",
    max_tokens=1024,
    messages=[{"role": "user", "content": "Write a story"}]
) as stream:
    for text in stream.text_stream:
        print(text, end="", flush=True)

Vision with Claude

import base64

# Read image
with open("image.jpg", "rb") as f:
    image_data = base64.standard_b64encode(f.read()).decode("utf-8")

message = client.messages.create(
    model="claude-3-5-sonnet-20241022",
    max_tokens=1024,
    messages=[
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "source": {
                        "type": "base64",
                        "media_type": "image/jpeg",
                        "data": image_data
                    }
                },
                {
                    "type": "text",
                    "text": "Describe this image"
                }
            ]
        }
    ]
)

Tool Use (Function Calling)

# Define tools for Claude
tools = [
    {
        "name": "get_stock_price",
        "description": "Get current stock price",
        "input_schema": {
            "type": "object",
            "properties": {
                "symbol": {
                    "type": "string",
                    "description": "Stock symbol (e.g., AAPL)"
                }
            },
            "required": ["symbol"]
        }
    }
]

message = client.messages.create(
    model="claude-3-5-sonnet-20241022",
    max_tokens=1024,
    tools=tools,
    messages=[{"role": "user", "content": "What's Apple's stock price?"}]
)

# Handle tool use
if message.stop_reason == "tool_use":
    tool_use = next(
        block for block in message.content
        if block.type == "tool_use"
    )

    # Execute tool
    result = get_stock_price(tool_use.input["symbol"])

    # Continue conversation
    response = client.messages.create(
        model="claude-3-5-sonnet-20241022",
        max_tokens=1024,
        tools=tools,
        messages=[
            {"role": "user", "content": "What's Apple's stock price?"},
            {"role": "assistant", "content": message.content},
            {
                "role": "user",
                "content": [
                    {
                        "type": "tool_result",
                        "tool_use_id": tool_use.id,
                        "content": json.dumps(result)
                    }
                ]
            }
        ]
    )

Google Gemini API

Setup และ Authentication

pip install google-generativeai
import google.generativeai as genai

genai.configure(api_key=os.environ["GOOGLE_API_KEY"])

Text Generation

# Basic generation
model = genai.GenerativeModel("gemini-1.5-pro")

response = model.generate_content("Explain how AI works")
print(response.text)

Chat Conversation

# Multi-turn conversation
model = genai.GenerativeModel("gemini-1.5-pro")
chat = model.start_chat(history=[])

response = chat.send_message("Hello, I want to learn about Python")
print(response.text)

response = chat.send_message("What are the basic data types?")
print(response.text)

Streaming

# Streaming response
model = genai.GenerativeModel("gemini-1.5-pro")

response = model.generate_content(
    "Write a long story about space exploration",
    stream=True
)

for chunk in response:
    print(chunk.text, end="")

Vision with Gemini

import PIL.Image

# Load image
image = PIL.Image.open("photo.jpg")

model = genai.GenerativeModel("gemini-1.5-pro")

response = model.generate_content([
    "Describe what you see in this image",
    image
])

print(response.text)

Long Context (2M tokens)

# Gemini's killer feature: 2M context window
model = genai.GenerativeModel("gemini-1.5-pro")

# Upload large file
file = genai.upload_file("large_document.pdf")

response = model.generate_content([
    "Summarize this document",
    file
])

Best Practices

1. Error Handling

import time
from openai import RateLimitError, APIError

def call_with_retry(func, max_retries=3, base_delay=1):
    """Call API with exponential backoff"""
    for attempt in range(max_retries):
        try:
            return func()
        except RateLimitError:
            if attempt == max_retries - 1:
                raise
            delay = base_delay * (2 ** attempt)
            print(f"Rate limited. Retrying in {delay}s...")
            time.sleep(delay)
        except APIError as e:
            print(f"API Error: {e}")
            raise

# Usage
result = call_with_retry(
    lambda: client.chat.completions.create(
        model="gpt-4o",
        messages=[{"role": "user", "content": "Hello"}]
    )
)

2. Token Management

import tiktoken

def count_tokens(text: str, model: str = "gpt-4o") -> int:
    """Count tokens in text"""
    encoding = tiktoken.encoding_for_model(model)
    return len(encoding.encode(text))

def truncate_to_tokens(text: str, max_tokens: int, model: str = "gpt-4o") -> str:
    """Truncate text to fit within token limit"""
    encoding = tiktoken.encoding_for_model(model)
    tokens = encoding.encode(text)

    if len(tokens) <= max_tokens:
        return text

    return encoding.decode(tokens[:max_tokens])

# Usage
text = "Very long text..."
token_count = count_tokens(text)
print(f"Token count: {token_count}")

if token_count > 4000:
    text = truncate_to_tokens(text, 4000)

3. Caching

import hashlib
import json
from functools import lru_cache

# In-memory cache
@lru_cache(maxsize=1000)
def cached_completion(messages_hash: str, model: str):
    """Cache completions to avoid duplicate API calls"""
    # This is called only on cache miss
    pass

def get_completion_with_cache(messages: list, model: str = "gpt-4o"):
    """Get completion with caching"""
    # Create hash of messages
    messages_str = json.dumps(messages, sort_keys=True)
    messages_hash = hashlib.md5(messages_str.encode()).hexdigest()

    # Check cache
    cache_key = f"{model}:{messages_hash}"

    # Use Redis for production
    cached = redis_client.get(cache_key)
    if cached:
        return json.loads(cached)

    # Call API
    response = client.chat.completions.create(
        model=model,
        messages=messages
    )

    result = response.choices[0].message.content

    # Cache result
    redis_client.setex(cache_key, 3600, json.dumps(result))  # 1 hour TTL

    return result

4. Cost Optimization

class CostTracker:
    """Track API costs"""

    PRICING = {
        "gpt-4o": {"input": 0.005, "output": 0.015},  # per 1K tokens
        "gpt-4o-mini": {"input": 0.00015, "output": 0.0006},
        "claude-3-5-sonnet-20241022": {"input": 0.003, "output": 0.015},
        "gemini-1.5-pro": {"input": 0.00125, "output": 0.005}
    }

    def __init__(self):
        self.total_cost = 0
        self.usage_log = []

    def log_usage(self, model: str, input_tokens: int, output_tokens: int):
        pricing = self.PRICING.get(model, {"input": 0, "output": 0})
        cost = (
            (input_tokens / 1000) * pricing["input"] +
            (output_tokens / 1000) * pricing["output"]
        )

        self.total_cost += cost
        self.usage_log.append({
            "model": model,
            "input_tokens": input_tokens,
            "output_tokens": output_tokens,
            "cost": cost,
            "timestamp": datetime.now().isoformat()
        })

        return cost

    def get_report(self) -> dict:
        return {
            "total_cost": self.total_cost,
            "total_requests": len(self.usage_log),
            "by_model": self._group_by_model()
        }

# Usage
tracker = CostTracker()

response = client.chat.completions.create(
    model="gpt-4o",
    messages=[{"role": "user", "content": "Hello"}]
)

cost = tracker.log_usage(
    "gpt-4o",
    response.usage.prompt_tokens,
    response.usage.completion_tokens
)
print(f"This request cost: ${cost:.4f}")

5. Multi-Provider Fallback

class AIClient:
    """Multi-provider AI client with fallback"""

    def __init__(self):
        self.openai = OpenAI()
        self.anthropic = Anthropic()
        self.providers = ["openai", "anthropic"]

    async def chat(
        self,
        messages: list,
        preferred_provider: str = "openai"
    ) -> str:
        providers = [preferred_provider] + [
            p for p in self.providers if p != preferred_provider
        ]

        for provider in providers:
            try:
                if provider == "openai":
                    return await self._openai_chat(messages)
                elif provider == "anthropic":
                    return await self._anthropic_chat(messages)
            except Exception as e:
                print(f"{provider} failed: {e}")
                continue

        raise Exception("All providers failed")

    async def _openai_chat(self, messages: list) -> str:
        response = self.openai.chat.completions.create(
            model="gpt-4o",
            messages=messages
        )
        return response.choices[0].message.content

    async def _anthropic_chat(self, messages: list) -> str:
        # Convert messages format
        anthropic_messages = [
            {"role": m["role"], "content": m["content"]}
            for m in messages if m["role"] != "system"
        ]
        system = next(
            (m["content"] for m in messages if m["role"] == "system"),
            None
        )

        response = self.anthropic.messages.create(
            model="claude-3-5-sonnet-20241022",
            max_tokens=1024,
            system=system or "",
            messages=anthropic_messages
        )
        return response.content[0].text

Security Best Practices

API Key Management

# Never hardcode API keys!

# Bad
api_key = "sk-xxx..."  # Don't do this!

# Good - use environment variables
api_key = os.environ["OPENAI_API_KEY"]

# Better - use secret manager
from google.cloud import secretmanager

def get_secret(secret_id: str) -> str:
    client = secretmanager.SecretManagerServiceClient()
    name = f"projects/my-project/secrets/{secret_id}/versions/latest"
    response = client.access_secret_version(request={"name": name})
    return response.payload.data.decode("UTF-8")

api_key = get_secret("openai-api-key")

Input Validation

def sanitize_user_input(text: str) -> str:
    """Sanitize user input before sending to AI"""
    # Remove potential injection attempts
    text = text.replace("IGNORE ALL PREVIOUS INSTRUCTIONS", "")

    # Limit length
    max_length = 10000
    if len(text) > max_length:
        text = text[:max_length]

    return text.strip()

# Usage
user_input = sanitize_user_input(request.message)

สรุป

การเลือกใช้ AI API ที่เหมาะสมขึ้นอยู่กับ use case, งบประมาณ, และความต้องการด้าน performance การเข้าใจ best practices จะช่วยให้คุณสร้าง AI application ที่มีประสิทธิภาพ ปลอดภัย และคุ้มค่า


อ่านบทความที่เกี่ยวข้อง


พร้อมเริ่มใช้ AI API แล้วหรือยัง?

ติดต่อทีม AI Unlocked สำหรับคำปรึกษาและการพัฒนา AI application ที่ตอบโจทย์ธุรกิจของคุณ


เขียนโดย

AI Unlocked Team