| Integration test uses credits |
Tests on every PR |
Only run on
Diagnose and fix SerpApi errors: invalid keys, exhausted credits, blocked searches.
ReadGrepBash(curl:*)
SerpApi Common Errors
Overview
Quick reference for SerpApi errors. Check searchmetadata.status first -- it will be Success or Error. Error details are in searchmetadata.error or error at the top level.
Error Reference
Invalid API Key
{ "error": "Invalid API key. Your API key should be here: https://serpapi.com/manage-api-key" }
Fix: Verify key at serpapi.com/manage-api-key. Check env var is loaded.
Account Disabled / Searches Exhausted
{ "error": "Your searches for the month have run out. You can upgrade your plan at https://serpapi.com/pricing" }
Fix: Check usage: curl "https://serpapi.com/account.json?apikey=$SERPAPIAPI_KEY". Upgrade plan or wait for monthly reset.
Missing Required Parameter
{ "error": "Missing parameter: q. Please provide a search query." }
Fix: Each engine has different query params. Google/Bing use q, YouTube uses search_query.
Google CAPTCHA / Blocked
{ "search_metadata": { "status": "Error" }, "error": "Google hasn't returned any results for this query." }
Fix: SerpApi handles CAPTCHAs automatically, but unusual queries or very high volume may trigger blocks. Try different location or wait.
Empty Organic Results (Not an Error)
result = client.search(engine="google", q="xyzzy123nonexistent")
if not result.get("organic_results"):
# Not an error -- query just has no results
# Check for answer_box, knowledge_graph, etc.
print("No organic results, checking other components...")
print(f"Answer box: {result.get('answer_box')}")
print(f"Related searches: {result.get('related_searches')}")
Quick Diagnostic
# 1. Check API key and account status
curl -s "https://serpapi.com/account.json?api_key=$SERPAPI_API_KEY" | jq '{
plan: .plan_name, used: .this_month_usage, remaining: .plan_searches_left
}'
# 2. Test basic search
curl -s "https://serpapi.com/search.json?q=test&engine=google&api_key=$SERPAPI_API_KEY" \
| jq '.search_metadata.status'
# 3. Check search archive (last 10 searches)
curl -s "https://serpapi.com/searches.json?api_key=$SERPAPI_API_KEY" \
| jq '.[0:3] | .[] | {id: .id, status: .status, query: .search_parameters.q}'
Error Handling
| Error |
Ret
Google Search scraping with SerpApi -- organic results, knowledge graph, answer boxes.
ReadWriteEditBash(npm:*)Grep
SerpApi Core Workflow A: Google Search
Overview
Extract structured data from Google Search: organic results, answer boxes, knowledge graph, related questions (PAA), local pack, ads, and shopping results. Each search costs 1 API credit.
Instructions
Step 1: Full Google Search with All Components
import serpapi, os
client = serpapi.Client(api_key=os.environ["SERPAPI_API_KEY"])
result = client.search(
engine="google",
q="best project management tools",
location="New York, New York",
hl="en", gl="us",
num=10,
)
# 1. Organic Results
for r in result.get("organic_results", []):
print(f"{r['position']}. {r['title']}")
print(f" URL: {r['link']}")
print(f" Snippet: {r.get('snippet', 'N/A')}")
# Rich snippets: sitelinks, rating, date
if "rich_snippet" in r:
print(f" Rating: {r['rich_snippet'].get('top', {}).get('rating')}")
# 2. Answer Box
if ab := result.get("answer_box"):
print(f"\nAnswer Box ({ab.get('type', 'unknown')}):")
print(f" {ab.get('answer') or ab.get('snippet') or ab.get('title')}")
# 3. Knowledge Graph
if kg := result.get("knowledge_graph"):
print(f"\nKnowledge Graph: {kg['title']}")
print(f" Type: {kg.get('type')}")
print(f" Description: {kg.get('description', 'N/A')[:100]}")
# 4. People Also Ask
for paa in result.get("related_questions", []):
print(f"\nPAA: {paa['question']}")
print(f" Answer: {paa.get('snippet', 'N/A')[:100]}")
# 5. Related Searches
for rs in result.get("related_searches", []):
print(f"Related: {rs['query']}")
Step 2: Paginate Through Results
def paginate_google(query: str, pages: int = 3, num: int = 10):
"""Get multiple pages of results (each page = 1 credit)."""
all_results = []
for page in range(pages):
result = client.search(
engine="google", q=query, num=num,
start=page * num, # Offset parameter
)
organic = result.get("organic_results", [])
if not organic:
break
all_results.extend(organic)
return all_results
results = paginate_google("python web frameworks", pages=3)
print(f"Total results: {len(results)}")
Step 3: Google with Filters
# Time-based filtering
recent = client.search(engine="google", q="AI news", tbs="qdr:w") # Past week
# tbs options: qdr:h
Search Bing, YouTube, Google Shopping, Google News, and Google Maps with SerpApi.
ReadWriteEditBash(npm:*)Grep
SerpApi Core Workflow B: Multi-Engine Search
Overview
SerpApi supports 15+ search engines beyond Google. Each engine has its own parameters and result structure. Key engines: YouTube (search_query), Bing (q), Google News, Google Shopping, Google Maps, Walmart, eBay, Apple App Store.
Instructions
Step 1: YouTube Search
import serpapi, os
client = serpapi.Client(api_key=os.environ["SERPAPI_API_KEY"])
# YouTube uses search_query (not q)
yt = client.search(engine="youtube", search_query="python asyncio tutorial")
for video in yt.get("video_results", []):
print(f"{video['title']}")
print(f" Channel: {video.get('channel', {}).get('name')}")
print(f" Views: {video.get('views')}, Length: {video.get('length')}")
print(f" Link: {video['link']}")
print(f" Published: {video.get('published_date')}")
Step 2: Bing Search
bing = client.search(engine="bing", q="machine learning frameworks", count=10)
for r in bing.get("organic_results", []):
print(f"{r['position']}. {r['title']}")
print(f" {r['link']}")
# Bing has different snippet structure
print(f" {r.get('snippet', 'N/A')}")
Step 3: Google News
news = client.search(engine="google_news", q="artificial intelligence", gl="us", hl="en")
for article in news.get("news_results", []):
print(f"{article['title']}")
print(f" Source: {article['source']['name']}")
print(f" Date: {article.get('date')}")
print(f" Link: {article['link']}")
# News often has thumbnail
if "thumbnail" in article:
print(f" Image: {article['thumbnail']}")
Step 4: Google Shopping
shopping = client.search(
engine="google_shopping",
q="mechanical keyboard",
gl="us",
hl="en",
)
for product in shopping.get("shopping_results", []):
print(f"{product['title']}")
print(f" Price: {product.get('price')}")
print(f" Source: {product.get('source')}")
print(f" Rating: {product.get('rating')} ({product.get('reviews', 0)} reviews)")
print(f" Link: {product['link']}")
Step 5: Google Maps / Local
maps = client.search(
engine="google_maps",
q="pizza restaurants",
Optimize SerpApi costs by reducing credit consumption and choosing the right plan.
ReadGrep
SerpApi Cost Tuning
Overview
SerpApi charges per search (1 credit each). Plans: Free (100/mo), Developer ($75, 5K/mo), Business ($200, 15K/mo), Enterprise (custom). Key savings: caching, archive retrieval (free), and Google Light API.
Cost Strategies
Strategy 1: Aggressive Caching (Biggest Savings)
# Search results rarely change within an hour
# Cache for 1 hour = up to 24x credit reduction for hourly queries
# Cache for 1 day = up to 720x for queries checked every 2 minutes
import hashlib, json, redis, serpapi, os
r = redis.Redis.from_url(os.environ["REDIS_URL"])
client = serpapi.Client(api_key=os.environ["SERPAPI_API_KEY"])
def cached_search(ttl_seconds=3600, **params):
key = f"serpapi:{hashlib.md5(json.dumps(params, sort_keys=True).encode()).hexdigest()}"
cached = r.get(key)
if cached:
return json.loads(cached) # FREE: no credit consumed
result = client.search(**params) # 1 credit
r.setex(key, ttl_seconds, json.dumps(dict(result)))
return result
Strategy 2: Archive API (Free Retrieval)
# Every search result is stored in the archive
# Retrieve by search_id at no cost
archived = client.search(engine="google", search_id="previous_id")
# 0 credits -- use for re-processing or delayed access
Strategy 3: Google Light API (Same Cost, Faster)
# Same 1 credit but faster response (~1s vs 3-5s)
# Good for: organic results only, no knowledge graph needed
result = client.search(engine="google_light", q="query")
Strategy 4: Reduce num Parameter
# Default num=10 (10 results). If you only need top 3:
result = client.search(engine="google", q="query", num=3)
# Still 1 credit, but faster response
Cost Calculator
def estimate_monthly_cost(
daily_searches: int,
cache_hit_rate: float = 0.7, # 70% cache hits typical
) -> dict:
actual_api_calls = daily_searches * 30 * (1 - cache_hit_rate)
plans = [
("Free", 100, 0), ("Developer", 5000, 75),
("Business", 15000, 200), ("Enterprise", 50000, 500),
]
for name, limit, price in plans:
if actual_api_calls <= limit:
return {"plan": name, "price": f"${price}/mo",
"api_calls": int(actual_api_calls), "raw_searches": daily_searches * 30}
return {"plan": "Enterprise+", "price": "Custom", "api_calls": int(actual_api_calls)}
# Examples:
# 100 searches/day, 70% cache = 900 API calls/mo = Developer ($75)
# 500 searches/day, 80% cache = 3000 API calls/mo = Developer ($75)
# 1000 searches/day, 50% c
Collect SerpApi debug diagnostics: account status, recent searches, and error logs.
ReadBash(curl:*)Bash(tar:*)Grep
SerpApi Debug Bundle
Overview
Collect diagnostic data for SerpApi issues using the Account API and Searches Archive API. SerpApi stores all search results for retrieval without additional credit charges.
Instructions
Step 1: Collect Diagnostics
#!/bin/bash
BUNDLE="serpapi-debug-$(date +%Y%m%d-%H%M%S)"
mkdir -p "$BUNDLE"
KEY="${SERPAPI_API_KEY:?Set SERPAPI_API_KEY}"
# Account status
curl -s "https://serpapi.com/account.json?api_key=$KEY" \
| jq '{plan: .plan_name, used: .this_month_usage, remaining: .plan_searches_left, rate_limit: .searches_per_month}' \
> "$BUNDLE/account.json"
# Recent searches (last 10)
curl -s "https://serpapi.com/searches.json?api_key=$KEY" \
| jq '.[0:10] | .[] | {id: .id, status: .status, engine: .search_parameters.engine, query: .search_parameters.q, created: .created_at}' \
> "$BUNDLE/recent-searches.json"
# Test search
curl -s "https://serpapi.com/search.json?q=test&engine=google&num=1&api_key=$KEY" \
| jq '.search_metadata' > "$BUNDLE/test-search.json"
# Environment
echo "Node: $(node --version 2>/dev/null || echo N/A)" > "$BUNDLE/env.txt"
echo "Python: $(python3 --version 2>/dev/null || echo N/A)" >> "$BUNDLE/env.txt"
pip show serpapi 2>/dev/null >> "$BUNDLE/env.txt" || true
npm list serpapi 2>/dev/null >> "$BUNDLE/env.txt" || true
tar -czf "$BUNDLE.tar.gz" "$BUNDLE"
echo "Bundle: $BUNDLE.tar.gz"
Step 2: Retrieve Failed Search Details
# Use the Searches Archive API to get details of any past search
import serpapi, os
client = serpapi.Client(api_key=os.environ["SERPAPI_API_KEY"])
# Get a specific search by ID (no credit charge)
result = client.search(engine="google", search_id="YOUR_SEARCH_ID")
print(f"Status: {result['search_metadata']['status']}")
if "error" in result:
print(f"Error: {result['error']}")
Error Handling
| Finding |
Likely Issue |
Action |
remaining: 0 |
Credits exhausted |
Upgrade plan or wait for monthly reset |
| Test search fails |
API key issue |
Re-check key at serpapi.com |
| Recent searches show errors |
Bad parameters |
Check engine-specific param requirements |
Resources
Next Steps
For rate limit issues, see serpapi-rate-limits
Deploy SerpApi-powered search features to production platforms.
ReadWriteEditBash(vercel:*)Bash(fly:*)Bash(gcloud:*)
SerpApi Deploy Integration
Overview
Deploy SerpApi-powered search as a backend API endpoint. Always proxy through your server -- never expose the API key to browsers.
Instructions
Vercel Serverless Function
// api/search.ts
import { getJson } from 'serpapi';
export default async function handler(req: Request) {
const url = new URL(req.url);
const q = url.searchParams.get('q');
if (!q) return new Response('Missing q parameter', { status: 400 });
const engine = url.searchParams.get('engine') || 'google';
const num = parseInt(url.searchParams.get('num') || '5');
const result = await getJson({
engine, q, num,
api_key: process.env.SERPAPI_API_KEY,
});
return Response.json({
results: result.organic_results?.slice(0, num) || [],
answer_box: result.answer_box || null,
total_results: result.search_information?.total_results,
});
}
vercel env add SERPAPI_API_KEY production
vercel --prod
Cloud Run with Python
# main.py
from flask import Flask, request, jsonify
import serpapi, os
app = Flask(__name__)
client = serpapi.Client(api_key=os.environ["SERPAPI_API_KEY"])
@app.route("/search")
def search():
q = request.args.get("q")
if not q:
return jsonify({"error": "Missing q parameter"}), 400
result = client.search(engine="google", q=q, num=5)
return jsonify({
"results": result.get("organic_results", [])[:5],
"answer_box": result.get("answer_box"),
})
gcloud run deploy search-api \
--source . --region us-central1 \
--set-secrets=SERPAPI_API_KEY=serpapi-key:latest \
--allow-unauthenticated
Health Check
app.get('/health', async (req, res) => {
const account = await fetch(
`https://serpapi.com/account.json?api_key=${process.env.SERPAPI_API_KEY}`
).then(r => r.json());
res.json({
status: account.plan_searches_left > 0 ? 'healthy' : 'credits_exhausted',
remaining: account.plan_searches_left,
});
});
Error Handling
| Issue |
Cause |
Solution |
| Cold start slow |
First request initializes |
Pre-warm with min instances |
| Credits run out |
No budget monitoring |
Add health check with credit count |
| Key exposed |
Frontend calling SerpApi directly |
Always proxy through backend |
Resources
- Vercel Functions
-
Run your first SerpApi search -- Google, Bing, or YouTube results as JSON.
ReadWriteEditBash(npm:*)Bash(python3:*)
SerpApi Hello World
Overview
Run a Google search via SerpApi and parse the structured JSON response. SerpApi returns organic results, knowledge graph, answer boxes, ads, local results, and more -- all as structured data. Key parameter: engine (google, bing, youtube, etc.).
Prerequisites
serpapi package installed (see serpapi-install-auth)
SERPAPIAPIKEY environment variable set
Instructions
Step 1: Basic Google Search (Python)
import serpapi
import os
client = serpapi.Client(api_key=os.environ["SERPAPI_API_KEY"])
result = client.search(
engine="google",
q="best programming languages 2025",
location="Austin, Texas",
hl="en",
gl="us",
num=5, # Number of results
)
# Organic results
for r in result["organic_results"]:
print(f"{r['position']}. {r['title']}")
print(f" {r['link']}")
print(f" {r.get('snippet', 'No snippet')}\n")
# Answer box (if present)
if "answer_box" in result:
print(f"Answer Box: {result['answer_box'].get('answer', result['answer_box'].get('snippet'))}")
Step 2: Google Search (Node.js)
import { getJson } from 'serpapi';
const result = await getJson({
engine: 'google',
q: 'best programming languages 2025',
location: 'Austin, Texas',
hl: 'en',
gl: 'us',
num: 5,
api_key: process.env.SERPAPI_API_KEY,
});
result.organic_results.forEach((r: any) => {
console.log(`${r.position}. ${r.title}`);
console.log(` ${r.link}`);
});
// Knowledge graph
if (result.knowledge_graph) {
console.log(`\nKnowledge Graph: ${result.knowledge_graph.title}`);
}
Step 3: Try Different Engines
# Bing search
bing = client.search(engine="bing", q="Claude AI", count=5)
for r in bing["organic_results"]:
print(f"Bing: {r['title']}")
# YouTube search
youtube = client.search(engine="youtube", search_query="python tutorial")
for v in youtube["video_results"]:
print(f"YouTube: {v['title']} ({v['length']})")
# Google News
news = client.search(engine="google_news", q="artificial intelligence")
for n in news["news_results"]:
print(f"News: {n['title']} - {n['source']['name']}")
Output
1. Python - Best programming language for beginners
https://example.com/python
Python remains the top choice...
2. JavaScript - Most versatile language
https://example.com/js
JavaScr
Install SerpApi client and configure API key authentication.
ReadWriteEditBash(npm:*)Bash(pip:*)Grep
SerpApi Install & Auth
Overview
Install the SerpApi client library and configure API key authentication. SerpApi provides structured JSON results from Google, Bing, YouTube, and 15+ search engines. Auth is API-key-based via the apikey parameter or SERPAPIAPI_KEY env var.
Prerequisites
Instructions
Step 1: Install Client
# Python (official)
pip install serpapi
# Node.js (official)
npm install serpapi
# Alternative Python package (legacy but widely used)
pip install google-search-results
Step 2: Configure API Key
# .env
SERPAPI_API_KEY=your-api-key-here
Step 3: Verify Connection (Python)
import serpapi, os
client = serpapi.Client(api_key=os.environ["SERPAPI_API_KEY"])
result = client.search(engine="google", q="test", num=1)
print(f"Connected! Search ID: {result['search_metadata']['id']}")
Step 4: Verify Connection (Node.js)
import { getJson } from 'serpapi';
const result = await getJson({
engine: 'google', q: 'test', num: 1,
api_key: process.env.SERPAPI_API_KEY,
});
console.log(`Connected! Search ID: ${result.search_metadata.id}`);
Step 5: Check Account
curl "https://serpapi.com/account.json?api_key=$SERPAPI_API_KEY" | jq '{
plan: .plan_name, used: .this_month_usage, remaining: .plan_searches_left
}'
Output
Connected! Search ID: 64a1b2c3d4e5f6
{ plan: "Developer", used: 42, remaining: 4958 }
Error Handling
| Error |
Cause |
Solution |
Invalid API key |
Wrong or missing key |
Check serpapi.com/manage-api-key |
Your account is disabled |
Exceeded limits |
Upgrade or wait for monthly reset |
ModuleNotFoundError |
Not installed |
pip install serpapi |
Resources
Next Steps
Proceed to serpapi-hello-world for your first search.
Configure SerpApi local development with cached responses and test fixtures.
ReadWriteEditBash(npm:*)Bash(npx:*)Grep
SerpApi Local Dev Loop
Overview
Set up local development for SerpApi with response caching, fixture recording, and offline testing. SerpApi charges per search, so caching results locally is critical for cost-effective development.
Instructions
Step 1: Record Real Responses as Fixtures
import serpapi, json, os, hashlib
def record_fixture(params: dict, fixtures_dir="tests/fixtures"):
"""Run a real search and save the response as a fixture file."""
os.makedirs(fixtures_dir, exist_ok=True)
client = serpapi.Client(api_key=os.environ["SERPAPI_API_KEY"])
result = client.search(**params)
# Deterministic filename from params
key = hashlib.md5(json.dumps(params, sort_keys=True).encode()).hexdigest()[:12]
path = os.path.join(fixtures_dir, f"{params['engine']}_{key}.json")
with open(path, "w") as f:
json.dump(dict(result), f, indent=2)
print(f"Recorded: {path}")
# Record fixtures for common queries
record_fixture({"engine": "google", "q": "python tutorial", "num": 5})
record_fixture({"engine": "youtube", "search_query": "react hooks"})
record_fixture({"engine": "bing", "q": "machine learning"})
Step 2: Mock Client for Testing
import json, os
class MockSerpApiClient:
def __init__(self, fixtures_dir="tests/fixtures"):
self.fixtures_dir = fixtures_dir
def search(self, **params):
key = hashlib.md5(json.dumps(params, sort_keys=True).encode()).hexdigest()[:12]
path = os.path.join(self.fixtures_dir, f"{params['engine']}_{key}.json")
if os.path.exists(path):
with open(path) as f:
return json.load(f)
raise FileNotFoundError(f"No fixture for {params}. Run record_fixture() first.")
Step 3: Vitest Mocking (Node.js)
// tests/serpapi.test.ts
import { describe, it, expect, vi } from 'vitest';
import { readFileSync } from 'fs';
vi.mock('serpapi', () => ({
getJson: vi.fn(async (params) => {
const fixture = JSON.parse(
readFileSync(`tests/fixtures/google_sample.json`, 'utf-8')
);
return fixture;
}),
}));
describe('Search Service', () => {
it('parses organic results', async () => {
const { getJson } = await import('serpapi');
const result = await getJson({ engine: 'google', q: 'test' });
expect(result.organic_results).toBeDefined();
expect(result.organic_results[0]).toHaveProperty('title');
expect(result.organic_results[0]).toHaveProperty('link');
});
});
S
Optimize SerpApi performance with caching, async searches, and result filtering.
ReadWriteEdit
SerpApi Performance Tuning
Overview
SerpApi typical latency: 2-5 seconds per search (real-time scraping). Main optimization: aggressive caching since search results change slowly. Secondary: use Google Light API for faster responses, reduce num parameter, and parallelize independent searches.
Instructions
Step 1: Multi-Layer Caching
import { LRUCache } from 'lru-cache';
import { Redis } from 'ioredis';
import { getJson } from 'serpapi';
// L1: In-memory (fastest, per-instance)
const l1 = new LRUCache<string, any>({ max: 1000, ttl: 600_000 }); // 10 min
// L2: Redis (shared across instances)
const redis = new Redis(process.env.REDIS_URL!);
async function cachedSearch(params: Record<string, any>): Promise<any> {
const key = `serpapi:${JSON.stringify(params)}`;
// L1 check
const l1Hit = l1.get(key);
if (l1Hit) return l1Hit;
// L2 check
const l2Hit = await redis.get(key);
if (l2Hit) {
const parsed = JSON.parse(l2Hit);
l1.set(key, parsed);
return parsed;
}
// Cache miss: real API call
const result = await getJson({ ...params, api_key: process.env.SERPAPI_API_KEY });
l1.set(key, result);
await redis.setex(key, 3600, JSON.stringify(result)); // 1 hour in Redis
return result;
}
Step 2: Google Light API (Faster)
# Google Light API: ~1s instead of 2-5s, limited result fields
result = client.search(engine="google_light", q="fast query", num=5)
# Returns: organic_results with title, link, snippet only
# No knowledge_graph, answer_box, or rich snippets
Step 3: Reduce Response Size
# Only get the fields you need
result = client.search(
engine="google", q="query",
num=5, # Fewer results = faster
no_cache=False, # Use SerpApi's server-side cache (default)
)
# Strip metadata to reduce memory/storage
clean = {
"organic_results": result.get("organic_results", []),
"answer_box": result.get("answer_box"),
"search_id": result["search_metadata"]["id"],
}
Step 4: Parallel Search
import PQueue from 'p-queue';
const queue = new PQueue({ concurrency: 5, interval: 1000, intervalCap: 5 });
async function batchSearch(queries: string[]): Promise<any[]> {
return Promise.all(
queries.map(q =>
queue.add(() => cachedSearch({ engine: 'google', q, num: 5 }))
)
);
}
// 10 queries, 5 parallel, rate limited: ~4 seconds total
const results = await batchSearch(['query1', 'query2', /* ... */]);
Latency Benchmarks
| Method |
Typical Latency |
Credits |
Goo
Production readiness checklist for SerpApi integrations.
ReadBash(curl:*)Grep
SerpApi Production Checklist
Checklist
API Key & Authentication
- [ ] API key stored in secret manager (not env files)
- [ ] Backend proxy for all client-side search requests
- [ ] Key not exposed in frontend bundles or logs
- [ ] Usage monitoring configured
Credit Budget
- [ ] Monthly search volume estimated
- [ ] Plan tier matches expected volume
- [ ] Response caching implemented (LRU or Redis)
- [ ] Archive API used for result retrieval (free)
- [ ] Budget alerts set (e.g., 80% threshold)
Error Handling
- [ ] Check
search_metadata.status before using results
- [ ] Handle
error field in responses
- [ ] Retry on 500/timeout (max 2 retries)
- [ ] Graceful fallback when credits exhausted
- [ ] Log search IDs for debugging (
search_metadata.id)
Performance
- [ ] Response caching with appropriate TTL
- [ ] Rate limiting per plan tier (see
serpapi-rate-limits)
- [ ] Async search for non-critical queries
- [ ] Proxy endpoint rate-limited to prevent abuse
Health Check
app.get('/health', async (req, res) => {
try {
const account = await fetch(
`https://serpapi.com/account.json?api_key=${process.env.SERPAPI_API_KEY}`
).then(r => r.json());
res.json({
status: account.plan_searches_left > 0 ? 'healthy' : 'degraded',
serpapi: {
plan: account.plan_name,
remaining: account.plan_searches_left,
used: account.this_month_usage,
},
});
} catch {
res.status(503).json({ status: 'unhealthy', serpapi: { error: 'unreachable' } });
}
});
Error Handling
| Alert |
Condition |
Severity |
| Credits Low |
remaining < 10% |
P2 |
| Credits Exhausted |
remaining = 0 |
P1 |
| API Unreachable |
Account check fails |
P1 |
| High Error Rate |
> 5% searches fail |
P2 |
Resources
Next Steps
For version upgrades, see serpapi-upgrade-migration.
Handle SerpApi rate limits and credit-based usage quotas.
ReadWriteEdit
SerpApi Rate Limits
Overview
SerpApi uses credit-based pricing (each search = 1 credit) plus per-second rate limits. Retrieving cached/archived searches does not consume credits. Plans range from 100 searches/month (free) to unlimited (enterprise).
Plan Limits
| Plan |
Searches/Month |
Rate Limit |
Price |
| Free |
100 |
1/second |
$0 |
| Developer |
5,000 |
5/second |
$75/mo |
| Business |
15,000 |
10/second |
$200/mo |
| Enterprise |
50,000+ |
15/second |
Custom |
Instructions
Step 1: Monitor Credit Usage
import serpapi, os
client = serpapi.Client(api_key=os.environ["SERPAPI_API_KEY"])
# Check remaining credits before batch operations
account = client.account()
remaining = account["plan_searches_left"]
used = account["this_month_usage"]
total = account["total_searches_left"]
print(f"Used: {used}, Remaining: {remaining}")
if remaining < 100:
print("WARNING: Low credits remaining")
Step 2: Request Throttling
import time
from threading import Semaphore
class ThrottledSerpApi:
def __init__(self, api_key: str, max_per_second: int = 5):
self.client = serpapi.Client(api_key=api_key)
self.semaphore = Semaphore(max_per_second)
self.last_request = 0
def search(self, **params) -> dict:
with self.semaphore:
# Enforce minimum interval
elapsed = time.time() - self.last_request
if elapsed < 0.2: # 5/sec max
time.sleep(0.2 - elapsed)
self.last_request = time.time()
return self.client.search(**params)
Step 3: Use Archive to Avoid Credit Waste
# Retrieve a previous search result by ID (FREE, no credit charge)
archived = client.search(engine="google", search_id="previous_search_id")
# Check if a query was recently searched before spending a credit
# Store search IDs in your database keyed by query+params hash
Step 4: Node.js Rate Limiter
import PQueue from 'p-queue';
import { getJson } from 'serpapi';
const queue = new PQueue({
concurrency: 3, // Max parallel requests
interval: 1000, // Per second
intervalCap: 5, // Max 5 per second
});
async function throttledSearch(params: Record<string, any>) {
return queue.add(() => getJson({
...params,
api_key: process.env.SERPAPI_API_KEY,
}));
}
// Batch search with automatic throttling
const queries = ['query1', 'query2', 'query3'];
const results = await
Production architecture for SerpApi search services with caching, monitoring, and multi-engine support.
ReadGrep
SerpApi Reference Architecture
Overview
Production architecture for search-powered applications using SerpApi. Core components: cached search service, multi-engine abstraction, SERP monitoring pipeline, and credit budget management.
Architecture Diagram
┌──────────────────────────────────┐
│ API Layer │
│ /search /track /health │
├──────────────────────────────────┤
│ Search Service │
│ Multi-engine Caching Parsing │
├──────────────────────────────────┤
│ SerpApi Client │
│ Rate Limiting Retry Archive │
├──────────────────────────────────┤
│ Infrastructure │
│ Redis Cache PostgreSQL Cron │
└──────────────────────────────────┘
│
▼
┌──────────────────────────────────┐
│ SerpApi REST API │
│ google youtube bing news │
│ 1 credit/search, 100-50K/mo │
└──────────────────────────────────┘
Project Structure
search-service/
├── src/
│ ├── serpapi/
│ │ ├── client.ts # Cached search with rate limiting
│ │ ├── engines.ts # Engine-specific param mapping
│ │ └── types.ts # Typed result interfaces
│ ├── services/
│ │ ├── search.ts # Multi-engine search facade
│ │ ├── tracking.ts # Keyword rank tracking
│ │ └── credits.ts # Usage monitoring
│ ├── api/
│ │ ├── search.ts # /search proxy endpoint
│ │ └── health.ts # /health with credit check
│ └── jobs/
│ └── rank-tracker.ts # Daily keyword monitoring
├── tests/
│ ├── fixtures/ # Recorded SerpApi responses
│ └── search.test.ts # Fixture-based tests
└── config/
Key Components
Search Service Facade
class SearchService {
constructor(private client: CachedSerpApiClient, private db: Database) {}
async search(query: string, options?: { engine?: string; num?: number }) {
const engine = options?.engine || 'google';
const result = await this.client.cachedSearch({
engine, q: query, num: options?.num || 5,
});
// Normalize across engines
return {
results: result.organic_results || result.video_results || [],
answer_box: result.answer_box || null,
knowledge_graph: result.knowledge_graph || null,
search_id: result.search_metadata.id,
cached: result._cached || false,
};
}
async trackKeyword(keyword: string, domain: string) {
const result = await this.client.cachedSearch({
engine: 'google', q: keyword, num: 100,
});
const position = result.organic_results?.findIndex(
(r: any) => r.link?.includes(domain)
);
await this.db.saveRanking(keyword, domain, position >= 0 ? position + 1 : null);
}
}
Credit Budget Manager
class
Production-ready SerpApi client patterns with caching, typing, and multi-engine support.
ReadWriteEdit
SerpApi SDK Patterns
Overview
Production patterns for SerpApi: typed result interfaces, response caching (critical since each search costs credits), multi-engine abstraction, and async search with the Searches Archive API.
Instructions
Step 1: Typed Result Interfaces
interface SerpApiOrganicResult {
position: number;
title: string;
link: string;
snippet: string;
displayed_link: string;
source?: string;
}
interface SerpApiSearchResult {
search_metadata: { id: string; status: string; created_at: string };
search_parameters: Record<string, string>;
organic_results: SerpApiOrganicResult[];
answer_box?: { answer?: string; snippet?: string; title?: string };
knowledge_graph?: { title: string; description?: string; type?: string };
related_questions?: Array<{ question: string; snippet: string }>;
pagination?: { next: string };
}
Step 2: Cached Search Client
import { getJson } from 'serpapi';
import { LRUCache } from 'lru-cache';
const cache = new LRUCache<string, SerpApiSearchResult>({
max: 500,
ttl: 3600_000, // 1 hour -- search results are relatively stable
});
async function cachedSearch(params: Record<string, any>): Promise<SerpApiSearchResult> {
const key = JSON.stringify(params);
const cached = cache.get(key);
if (cached) return cached;
const result = await getJson({
...params,
api_key: process.env.SERPAPI_API_KEY,
}) as SerpApiSearchResult;
cache.set(key, result);
return result;
}
Step 3: Multi-Engine Search Abstraction
import serpapi, os
class SearchService:
ENGINES = {
"web": {"engine": "google", "query_param": "q"},
"news": {"engine": "google_news", "query_param": "q"},
"images": {"engine": "google_images", "query_param": "q"},
"youtube": {"engine": "youtube", "query_param": "search_query"},
"bing": {"engine": "bing", "query_param": "q"},
"shopping": {"engine": "google_shopping", "query_param": "q"},
}
def __init__(self):
self.client = serpapi.Client(api_key=os.environ["SERPAPI_API_KEY"])
def search(self, query: str, engine: str = "web", **kwargs) -> dict:
config = self.ENGINES[engine]
params = {
"engine": config["engine"],
config["query_param"]: query,
**kwargs,
}
return self.client.search(**params)
# Usage
svc = SearchService()
web = svc.search("Claude AI")
Secure SerpApi API keys and prevent credit abuse.
ReadWriteGrep
SerpApi Security Basics
Overview
SerpApi uses a single API key for authentication. The key grants full account access -- there are no scoped keys or OAuth. Protect it like a credit card: never expose in frontend code, always proxy through your backend.
Instructions
Step 1: Never Expose API Key in Frontend
// BAD: API key in browser-side code
const result = await fetch(`https://serpapi.com/search.json?q=${query}&api_key=YOUR_KEY`);
// GOOD: Proxy through your backend
// Frontend
const result = await fetch(`/api/search?q=${encodeURIComponent(query)}`);
// Backend (api/search.ts)
export async function GET(req: Request) {
const url = new URL(req.url);
const q = url.searchParams.get('q');
const result = await getJson({
engine: 'google', q,
api_key: process.env.SERPAPI_API_KEY, // Server-side only
});
return Response.json(result.organic_results);
}
Step 2: Secure Storage
# .gitignore
.env
.env.local
# Use platform secret managers in production
gh secret set SERPAPI_API_KEY # GitHub Actions
vercel env add SERPAPI_API_KEY # Vercel
fly secrets set SERPAPI_API_KEY=x # Fly.io
Step 3: Rate Limit Your Proxy
// Prevent abuse of your search proxy endpoint
import rateLimit from 'express-rate-limit';
const searchLimiter = rateLimit({
windowMs: 60_000, // 1 minute
max: 10, // 10 searches per minute per IP
message: 'Too many searches, try again later',
});
app.get('/api/search', searchLimiter, searchHandler);
Step 4: Monitor Usage
# Set up daily usage check
curl -s "https://serpapi.com/account.json?api_key=$SERPAPI_API_KEY" \
| jq '{used: .this_month_usage, remaining: .plan_searches_left}'
# Alert if usage is unexpectedly high
Security Checklist
- [ ] API key in environment variables only
- [ ]
.env in .gitignore
- [ ] Backend proxy for all search requests
- [ ] Rate limiting on proxy endpoints
- [ ] Usage monitoring and alerts
- [ ] Separate keys for dev/prod (if available)
Resources
Next Steps
For production deployment, see serpapi-prod-checklist.
Migrate between SerpApi client versions and handle package changes.
ReadWriteEditBash(npm:*)Bash(pip:*)Bash(git:*)
SerpApi Upgrade & Migration
Overview
The main migration path: google-search-results (legacy) to serpapi (current official package). The API itself is stable -- changes are in client library interfaces, not the REST API.
Instructions
Python: google-search-results to serpapi
# BEFORE: Legacy package
from serpapi import GoogleSearch
search = GoogleSearch({"q": "test", "api_key": key})
result = search.get_dict()
# AFTER: New official package
import serpapi
client = serpapi.Client(api_key=key)
result = client.search(engine="google", q="test")
# Result is already a dict -- no get_dict() needed
# Migration steps
pip uninstall google-search-results
pip install serpapi
# Update imports across codebase
# OLD: from serpapi import GoogleSearch
# NEW: import serpapi
Node.js: google-search-results-nodejs to serpapi
// BEFORE: Legacy
import { GoogleSearch } from 'google-search-results-nodejs';
const search = new GoogleSearch('api_key');
search.json({ q: 'test', engine: 'google' }, (result) => { ... });
// AFTER: Current (Promise-based)
import { getJson } from 'serpapi';
const result = await getJson({ engine: 'google', q: 'test', api_key: key });
// No callbacks -- uses Promises natively
Key Changes
| Aspect |
Legacy |
Current |
| Python import |
from serpapi import GoogleSearch |
import serpapi |
| Python init |
GoogleSearch(params_dict) |
serpapi.Client(api_key=key) |
| Python search |
search.get_dict() |
client.search(engine="google", q=...) |
| Node import |
google-search-results-nodejs |
serpapi |
| Node pattern |
Callback-based |
Promise/async-await |
| Engine param |
Via class name (GoogleSearch, BingSearch) |
Via engine parameter |
Migration Checklist
- [ ] Replace package:
pip install serpapi / npm install serpapi
- [ ] Update all imports
- [ ] Replace class-per-engine with
engine parameter
- [ ] Replace callbacks with async/await (Node.js)
- [ ] Remove
.get_dict() calls (Python -- result is already dict)
- [ ] Test all search queries return expected structure
- [ ] Update CI dependencies
Resources
- serpapi Python
-
Implement SerpApi async search callbacks and scheduled search monitoring.
ReadWriteEditBash(curl:*)
SerpApi Webhooks & Events
Overview
SerpApi does not have traditional webhooks, but supports async searches and the Searches Archive API. Build SERP monitoring by combining scheduled searches with change detection. Common use case: track keyword rankings over time.
Instructions
Step 1: Async Search with Polling
import serpapi, os, time
client = serpapi.Client(api_key=os.environ["SERPAPI_API_KEY"])
# Submit async search (returns immediately)
result = client.search(engine="google", q="your keyword", async_search=True)
search_id = result["search_metadata"]["id"]
print(f"Submitted: {search_id}")
# Poll for completion
while True:
archived = client.search(engine="google", search_id=search_id)
status = archived["search_metadata"]["status"]
if status == "Success":
break
elif status == "Error":
raise Exception(f"Search failed: {archived.get('error')}")
time.sleep(2)
print(f"Results: {len(archived.get('organic_results', []))}")
Step 2: SERP Monitoring Pipeline
import json, hashlib
from datetime import datetime
class SerpMonitor:
def __init__(self, client, db):
self.client = client
self.db = db
def track_keyword(self, keyword: str, domain: str):
"""Track a domain's ranking position for a keyword."""
result = self.client.search(engine="google", q=keyword, num=100)
organic = result.get("organic_results", [])
position = None
for r in organic:
if domain in r.get("link", ""):
position = r["position"]
break
self.db.insert({
"keyword": keyword,
"domain": domain,
"position": position, # None if not in top 100
"total_results": result.get("search_information", {}).get("total_results"),
"checked_at": datetime.utcnow().isoformat(),
"search_id": result["search_metadata"]["id"],
})
return position
def detect_changes(self, keyword: str, domain: str):
"""Compare current vs previous ranking."""
current = self.track_keyword(keyword, domain)
previous = self.db.get_previous_position(keyword, domain)
if previous and current:
change = previous - current # Positive = improved
if abs(change) >= 3:
self.notify(f"Ranking change for '{keyword}': {previous} -> {current} ({'+' if change > 0 else ''}{change})")
Step 3: Schedul
Ready to use serpapi-pack?
|
|
|