This module is for developers building Claude-powered applications in code. If you are a VS Code user using Claude Code interactively, everything you need is in Modules 07ā18 and 20ā24. Come back here when you need to automate Claude calls from Python or TypeScript.
Claude API ā Developer Reference
The Anthropic API is RESTful with official Python and TypeScript SDKs. It supports streaming, tool use, vision, prompt caching, batch processing, and extended thinking.
Setup
# Python pip install anthropic python-dotenv # TypeScript / Node npm install @anthropic-ai/sdk # Store your key ā never commit this to git echo 'ANTHROPIC_API_KEY=sk-ant-api03-...' > .env
import anthropic from dotenv import load_dotenv load_dotenv() client = anthropic.Anthropic() message = client.messages.create( model="claude-sonnet-4-6", max_tokens=1024, system="You are a copywriter for ThreadCo, a sustainable UK T-shirt brand.", messages=[{"role": "user", "content": "Write a 2-sentence description for the Sunset Gradient Tee."}] ) print(message.content[0].text)
Key API Parameters
| Parameter | Type | Description |
|---|---|---|
| model | string | Model ID ā e.g. claude-sonnet-4-6, claude-haiku-4-5-20251001 |
| max_tokens | integer | Max output tokens (required). Controls cost and response length. |
| messages | array | Conversation history ā alternating user/assistant turns |
| system | string | System prompt ā sets role, tone, and standing constraints |
| temperature | float 0ā1 | Randomness. 0 = deterministic, 1 = creative. Default: 1. |
| tools | array | Tool/function definitions for structured output and function calling |
| stream | boolean | Enable streaming ā tokens arrive as they are generated |
Production Patterns
# shopmate/descriptions.py ā generate ThreadCo product descriptions at scale import anthropic client = anthropic.Anthropic() SYSTEM = """You are a copywriter for ThreadCo, a sustainable UK T-shirt brand. Voice: friendly, direct, never corporate. Always mention the sustainable material. Forbidden words: vibrant, perfect, stylish, must-have, luxurious. No exclamation marks.""" def write_description(name: str, material: str, price: float, colours: list[str]) -> str: resp = client.messages.create( model="claude-haiku-4-5-20251001", # Haiku: fast + cheap for high-volume copy max_tokens=150, system=SYSTEM, messages=[{"role": "user", "content": f"Write a 2-sentence description for: {name}\n" f"Material: {material} | Price: Ā£{price} | Colours: {', '.join(colours)}" }] ) return resp.content[0].text
# shopmate/api/resilient_client.py ā handles traffic spikes gracefully import anthropic, time, random from anthropic import RateLimitError client = anthropic.Anthropic(max_retries=0) def create_with_retry(max_retries: int = 4, **kwargs): """Exponential backoff ā essential during flash sales or load spikes.""" for attempt in range(max_retries): try: return client.messages.create(**kwargs) except RateLimitError: wait = (2 ** attempt) + random.random() time.sleep(wait) raise RuntimeError("Max retries exceeded") # Streaming ā customers see response forming in real time def stream_reply(messages: list, system: str) -> str: full = "" with client.messages.stream( model="claude-haiku-4-5-20251001", max_tokens=250, system=system, messages=messages ) as stream: for text in stream.text_stream(): print(text, end="", flush=True) full += text return full
# Process 2,000 product descriptions overnight ā half the cost of synchronous calls def batch_descriptions(products: list[dict]) -> str: batch = client.messages.batches.create(requests=[ { "custom_id": p["id"], "params": { "model": "claude-haiku-4-5-20251001", "max_tokens": 150, "system": SYSTEM, "messages": [{"role": "user", "content": f"Describe: {p['name']}, {p['material']}"}] } } for p in products ]) print(f"Batch {batch.id} submitted ā results ready in ~5 minutes") return batch.id
# Few-shot: provide examples for consistent output style across 2,000 SKUs FEW_SHOT_EXAMPLES = """ EXAMPLE 1 ā Midnight Pocket Tee: "Soft, weighty organic cotton with a chest pocket that is actually useful. Made from GOTS-certified fabric in a relaxed unisex cut that goes with everything." EXAMPLE 2 ā Wave Print Crop Tee: "A hand-screen-printed wave ripples across the chest in water-based inks that last. Cut just above the hip in breathable organic cotton, sized XS to 3XL." """ def describe_with_examples(product: dict) -> str: user = f"""Here are two ThreadCo descriptions that worked well: {FEW_SHOT_EXAMPLES} Write a description for this product in the same style: Name: {product['name']} | Material: {product['material']} Key feature: {product['key_feature']} | Colours: {', '.join(product['colours'])} Two sentences. Same structure. No exclamation marks.""" resp = client.messages.create( model="claude-haiku-4-5-20251001", max_tokens=120, messages=[{"role": "user", "content": user}] ) return resp.content[0].text
# shopmate/api/main.py ā expose Claude-powered endpoints over HTTP from fastapi import FastAPI, HTTPException from pydantic import BaseModel from shopmate.descriptions import write_description app = FastAPI(title="ShopMate API") class ProductIn(BaseModel): name: str; material: str; price: float; colours: list[str] @app.post("/describe") async def describe(product: ProductIn): """Generate a product description for a ThreadCo item.""" text = write_description(product.name, product.material, product.price, product.colours) return {"description": text} # Run: uvicorn shopmate.api.main:app --reload --port 8000 # Test: curl -X POST http://localhost:8000/describe \ # -H "Content-Type: application/json" \ # -d '{"name":"Sunset Tee","material":"organic cotton","price":29.99,"colours":["orange"]}'
TypeScript SDK
import Anthropic from '@anthropic-ai/sdk'; const client = new Anthropic(); const message = await client.messages.create({ model: 'claude-sonnet-4-6', max_tokens: 1024, system: 'You are a ThreadCo copywriter.', messages: [{ role: 'user', content: 'Write a description for the Sunset Tee.' }], }); console.log(message.content[0].text);
claude-haiku-4-5-20251001 for high-volume tasks (descriptions, summaries, classification). claude-sonnet-4-6 for standard production use. claude-opus-4-6 for complex reasoning and agentic pipelines where quality justifies cost.
Complete Running Example
A full customer support chat application ā Python backend with streaming, TypeScript CLI client, and a tool call that looks up real order data. Clone it, set your API key, and it runs immediately.
What It Builds
A ShopMate support bot that answers customer questions, looks up orders by ID using a tool call, and streams responses token-by-token so the UI feels instant.
What It Covers
System prompts, multi-turn conversation history, tool use (function calling), streaming, and a FastAPI HTTP wrapper ā every production pattern in one app.
Run in 3 Commands
pip install anthropic fastapi uvicorn ā set ANTHROPIC_API_KEY ā uvicorn app:app --reload. No database, no auth, no boilerplate to strip out.
Project Structure
shopmate-support/ āāā app.py # FastAPI app ā HTTP endpoints āāā bot.py # Claude client ā streaming + tool use āāā orders.py # Fake order database (replace with real DB) āāā .env # ANTHROPIC_API_KEY=sk-ant-... āāā client.ts # TypeScript CLI to test the bot interactively
Step 1 ā The Order Lookup Tool
Define a tool that Claude can call when a customer asks about their order. Claude decides when to call it ā you just define the schema and implement the function.
# orders.py ā simulated order database. Replace with real DB queries in production. ORDERS = { "ORD-1001": {"status": "shipped", "item": "Sunset Gradient Tee (M, Orange)", "eta": "2 Apr", "tracking": "RM493827651GB"}, "ORD-1002": {"status": "processing", "item": "Wave Print Crop Tee (S, Slate)", "eta": "4 Apr", "tracking": None}, "ORD-1003": {"status": "delivered", "item": "Midnight Pocket Tee (L, Black)", "eta": None, "tracking": "RM381726495GB"}, } def get_order(order_id: str) -> dict: """Look up an order. Returns status dict or an error message.""" order = ORDERS.get(order_id.upper()) if not order: return {"error": f"Order {order_id} not found. Ask the customer to double-check their confirmation email."} result = {"order_id": order_id, "status": order["status"], "item": order["item"]} if order["eta"]: result["estimated_delivery"] = order["eta"] if order["tracking"]: result["tracking_number"] = order["tracking"] result["track_url"] = f"https://track.royalmail.com/tracking/{order['tracking']}" return result
Step 2 ā The Bot: Streaming + Tool Use
The core Claude client. It maintains conversation history, handles tool calls automatically, and streams the final response so users see tokens as they arrive.
# bot.py ā ShopMate support bot: multi-turn, streaming, tool use import anthropic, json from orders import get_order client = anthropic.Anthropic() SYSTEM = """You are ShopMate, the customer support assistant for ThreadCo ā a sustainable UK T-shirt brand. Be concise, warm, and helpful. When a customer mentions an order number (format: ORD-XXXX), use the get_order tool to look it up before responding about its status. Never guess order status ā always look it up. If you cannot resolve an issue, escalate: "I'll pass this to our team at hello@threadco.com ā you'll hear back within 1 business day." """ # Tool schema ā tells Claude what function it can call and what arguments it takes TOOLS = [{ "name": "get_order", "description": "Look up a ThreadCo order by order ID. Returns status, item, ETA, and tracking info.", "input_schema": { "type": "object", "properties": { "order_id": {"type": "string", "description": "The order ID, e.g. ORD-1001"} }, "required": ["order_id"] } }] def chat(history: list[dict], user_message: str) -> tuple[str, list[dict]]: """ Send a message and get a response, handling any tool calls automatically. Returns (assistant_reply, updated_history). history: list of {"role": "user"/"assistant", "content": ...} dicts """ history = history + [{"role": "user", "content": user_message}] while True: response = client.messages.create( model="claude-sonnet-4-6", max_tokens=512, system=SYSTEM, tools=TOOLS, messages=history, ) # Tool call ā Claude wants to look up an order if response.stop_reason == "tool_use": tool_block = next(b for b in response.content if b.type == "tool_use") tool_result = get_order(tool_block.input["order_id"]) print(f" [tool call: get_order({tool_block.input['order_id']}) ā {tool_result['status']}]") # Append Claude's tool-use message + our tool result, then loop history = history + [ {"role": "assistant", "content": response.content}, {"role": "user", "content": [{ "type": "tool_result", "tool_use_id": tool_block.id, "content": json.dumps(tool_result) }]} ] continue # Give Claude the result so it can write the final reply # Final text response reply = response.content[0].text history = history + [{"role": "assistant", "content": reply}] return reply, history def stream_chat(history: list[dict], user_message: str): """ Streaming version ā yields text tokens as they arrive. Handles tool calls internally, then streams the final reply. Yields: text chunks (str) and the final history (list) as the last item. """ # Handle tool calls without streaming (tool round-trips are fast) history = history + [{"role": "user", "content": user_message}] response = client.messages.create( model="claude-sonnet-4-6", max_tokens=512, system=SYSTEM, tools=TOOLS, messages=history, ) while response.stop_reason == "tool_use": tool_block = next(b for b in response.content if b.type == "tool_use") tool_result = get_order(tool_block.input["order_id"]) history += [ {"role": "assistant", "content": response.content}, {"role": "user", "content": [{ "type": "tool_result", "tool_use_id": tool_block.id, "content": json.dumps(tool_result) }]} ] response = client.messages.create( model="claude-sonnet-4-6", max_tokens=512, system=SYSTEM, tools=TOOLS, messages=history, ) # Stream the final text reply full = "" with client.messages.stream( model="claude-sonnet-4-6", max_tokens=512, system=SYSTEM, messages=history, ) as s: for text in s.text_stream(): full += text yield text history += [{"role": "assistant", "content": full}] yield history # last yield is the updated history list
Step 3 ā FastAPI HTTP Server
Wrap the bot in a FastAPI server. The /chat endpoint returns a full response; /chat/stream returns a streaming SSE response for real-time UIs.
# app.py ā FastAPI server wrapping ShopMate bot # Run: uvicorn app:app --reload from fastapi import FastAPI from fastapi.responses import StreamingResponse from pydantic import BaseModel from dotenv import load_dotenv from bot import chat, stream_chat load_dotenv() app = FastAPI(title="ShopMate Support API") class ChatRequest(BaseModel): message: str history: list[dict] = [] # Previous turns ā client maintains this class ChatResponse(BaseModel): reply: str history: list[dict] # Send back so client can include in next request # --- Standard (non-streaming) endpoint --- @app.post("/chat", response_model=ChatResponse) async def chat_endpoint(req: ChatRequest): reply, updated_history = chat(req.history, req.message) return ChatResponse(reply=reply, history=updated_history) # --- Streaming endpoint ā tokens arrive as server-sent events --- @app.post("/chat/stream") async def chat_stream(req: ChatRequest): async def generate(): for chunk in stream_chat(req.history, req.message): if isinstance(chunk, str): yield f"data: {chunk}\n\n" return StreamingResponse(generate(), media_type="text/event-stream") # Test with curl: # curl -X POST http://localhost:8000/chat \ # -H "Content-Type: application/json" \ # -d '{"message": "Where is my order ORD-1001?", "history": []}'
Step 4 ā TypeScript CLI Client
A Node.js CLI that talks directly to the Anthropic SDK ā same bot logic in TypeScript, with readline for interactive conversation in the terminal.
// client.ts ā interactive ShopMate terminal bot // Run: npx ts-node client.ts // Needs: npm install @anthropic-ai/sdk dotenv readline import Anthropic from '@anthropic-ai/sdk'; import * as readline from 'readline'; import 'dotenv/config'; const client = new Anthropic(); const SYSTEM = `You are ShopMate, customer support for ThreadCo. When a customer gives an order number (ORD-XXXX), use get_order to look it up. Never guess order status.`; const TOOLS: Anthropic.Tool[] = [{ name: 'get_order', description: 'Look up a ThreadCo order by ID.', input_schema: { type: 'object', properties: { order_id: { type: 'string', description: 'Order ID e.g. ORD-1001' } }, required: ['order_id'] } }]; // Simulated order lookup (same data as orders.py) const ORDERS: Record<string, object> = { 'ORD-1001': { status: 'shipped', item: 'Sunset Gradient Tee', eta: '2 Apr', tracking: 'RM493827651GB' }, 'ORD-1002': { status: 'processing', item: 'Wave Print Crop Tee', eta: '4 Apr', tracking: null }, 'ORD-1003': { status: 'delivered', item: 'Midnight Pocket Tee', eta: null, tracking: 'RM381726495GB' }, }; function getOrder(id: string): object { return ORDERS[id.toUpperCase()] ?? { error: `Order ${id} not found.` }; } type Message = { role: 'user' | 'assistant'; content: string | Anthropic.ContentBlock[] }; async function sendMessage(history: Message[], userText: string): Promise<[string, Message[]]> { history = [...history, { role: 'user', content: userText }]; while (true) { const response = await client.messages.create({ model: 'claude-sonnet-4-6', max_tokens: 512, system: SYSTEM, tools: TOOLS, messages: history, }); if (response.stop_reason === 'tool_use') { const toolBlock = response.content.find(b => b.type === 'tool_use') as Anthropic.ToolUseBlock; const result = getOrder((toolBlock.input as { order_id: string }).order_id); console.log(` [tool: get_order ā ${JSON.stringify(result)}]`); history = [ ...history, { role: 'assistant', content: response.content }, { role: 'user', content: [{ type: 'tool_result', tool_use_id: toolBlock.id, content: JSON.stringify(result) }] as any } ]; continue; } const reply = (response.content[0] as Anthropic.TextBlock).text; history = [...history, { role: 'assistant', content: reply }]; return [reply, history]; } } // --- Interactive CLI loop --- async function main() { const rl = readline.createInterface({ input: process.stdin, output: process.stdout }); let history: Message[] = []; console.log('ShopMate Support ā type your question, Ctrl+C to exit\n'); const ask = () => rl.question('You: ', async (input) => { if (!input.trim()) { ask(); return; } const [reply, updated] = await sendMessage(history, input); history = updated; console.log(`\nShopMate: ${reply}\n`); ask(); }); ask(); } main();
Running It
Install dependencies
pip install anthropic fastapi uvicorn python-dotenv
Set your API key
echo 'ANTHROPIC_API_KEY=sk-ant-api03-...' > .env
Start the server
uvicorn app:app --reload # ā http://localhost:8000/docs (auto-generated API docs)
Test with curl
curl -X POST http://localhost:8000/chat \
-H "Content-Type: application/json" \
-d '{"message": "Where is my order ORD-1001?", "history": []}'Or run the TypeScript CLI
npm install @anthropic-ai/sdk dotenv npx ts-node client.ts
Sample Conversation
This example covers the core patterns. To take it to production: replace orders.py with a real database query, add authentication to the FastAPI endpoints, connect the streaming endpoint to a React frontend using the EventSource API, and deploy the bot behind an API gateway with rate limiting per user.