Capturing website screenshots in Python is a common need — link previews, automated visual testing, archiving, monitoring dashboards. The traditional approach means installing Selenium or Playwright with a full browser runtime. This tutorial shows you a much simpler path using the URLSnap REST API.
You'll learn how to:
Just the requests library — which you probably already have:
pip install requests
No browsers, no WebDriver binaries, no system dependencies. The API handles the Chromium instance server-side.
import requests
resp = requests.post(
'https://urlsnap.dev/api/register',
json={'email': 'you@example.com'}
)
data = resp.json()
print(data['key']) # us_abc123...
# Free tier: 20 screenshots/day
import requests
API_KEY = 'us_your_key_here'
def screenshot_url(url: str, output_path: str):
response = requests.get(
'https://urlsnap.dev/api/screenshot',
params={'url': url, 'format': 'png', 'full_page': 'true'},
headers={'x-api-key': API_KEY},
)
response.raise_for_status()
with open(output_path, 'wb') as f:
f.write(response.content)
print(f'Saved screenshot to {output_path}')
screenshot_url('https://example.com', 'example.png')
| Parameter | Type | Default | Description |
|---|---|---|---|
| url | string | required | Target URL to screenshot |
| width | integer | 1280 | Viewport width in pixels |
| height | integer | 800 | Viewport height in pixels |
| format | string | png | png or jpeg |
| full_page | bool | false | Capture full scrollable page |
| delay | integer | 0 | Wait ms after load (max 5000) |
import requests
API_KEY = 'us_your_key_here'
def screenshot_url(url: str, output_path: str, **kwargs):
"""
Take a screenshot with optional parameters.
Example kwargs: width=375, height=812, format='jpeg', full_page='true', delay=1000
"""
params = {'url': url, **kwargs}
resp = requests.get(
'https://urlsnap.dev/api/screenshot',
params=params,
headers={'x-api-key': API_KEY},
timeout=45,
)
resp.raise_for_status()
with open(output_path, 'wb') as f:
f.write(resp.content)
# Mobile viewport (iPhone 14 size)
screenshot_url(
'https://example.com',
'mobile.png',
width=390,
height=844,
)
# Full-page JPEG with delay for lazy-loaded images
screenshot_url(
'https://example.com',
'full.jpg',
format='jpeg',
full_page='true',
delay=2000,
)
# Desktop 4K screenshot
screenshot_url(
'https://example.com',
'hd.png',
width=2560,
height=1440,
)
Need to screenshot many URLs quickly? Use httpx with asyncio to run them concurrently:
import asyncio
import httpx
API_KEY = 'us_your_key_here'
async def screenshot_async(client: httpx.AsyncClient, url: str) -> bytes:
resp = await client.get(
'https://urlsnap.dev/api/screenshot',
params={'url': url, 'format': 'png', 'width': '1280'},
headers={'x-api-key': API_KEY},
timeout=45.0,
)
resp.raise_for_status()
return resp.content
async def batch_screenshot(urls: list[str]) -> list[bytes]:
async with httpx.AsyncClient() as client:
tasks = [screenshot_async(client, url) for url in urls]
return await asyncio.gather(*tasks)
# Screenshot 5 URLs concurrently
urls = [
'https://example.com',
'https://github.com',
'https://news.ycombinator.com',
'https://python.org',
'https://pypi.org',
]
screenshots = asyncio.run(batch_screenshot(urls))
for i, img in enumerate(screenshots):
with open(f'screenshot_{i}.png', 'wb') as f:
f.write(img)
print(f'screenshot_{i}.png — {len(img)//1024}KB')
import requests
from django.http import HttpResponse
from django.views import View
class LinkPreviewView(View):
"""Generate a live screenshot preview for any URL."""
API_KEY = 'us_your_key_here'
def get(self, request):
url = request.GET.get('url')
if not url:
return HttpResponse('url parameter required', status=400)
resp = requests.get(
'https://urlsnap.dev/api/screenshot',
params={'url': url, 'width': '1200', 'height': '630'},
headers={'x-api-key': self.API_KEY},
timeout=45,
)
if resp.status_code == 429:
return HttpResponse('Daily limit reached', status=429)
resp.raise_for_status()
return HttpResponse(resp.content, content_type='image/png')
import time
import requests
API_KEY = 'us_your_key_here'
def screenshot_with_retry(url: str, output_path: str, retries: int = 2) -> bool:
for attempt in range(retries + 1):
try:
resp = requests.get(
'https://urlsnap.dev/api/screenshot',
params={'url': url, 'full_page': 'true'},
headers={'x-api-key': API_KEY},
timeout=45,
)
if resp.status_code == 429:
print('Daily limit reached. Upgrade at urlsnap.dev/#pricing')
return False
if resp.status_code == 401:
print('Invalid API key.')
return False
resp.raise_for_status()
with open(output_path, 'wb') as f:
f.write(resp.content)
return True
except requests.exceptions.Timeout:
if attempt < retries:
print(f'Timeout, retrying ({attempt + 1}/{retries})...')
time.sleep(2)
else:
print(f'Failed after {retries} retries: timeout')
return False
except requests.exceptions.RequestException as e:
print(f'Error: {e}')
return False
return False
success = screenshot_with_retry('https://example.com', 'out.png')
print('Done!' if success else 'Failed.')
import requests
info = requests.get(
'https://urlsnap.dev/api/me',
headers={'x-api-key': 'us_your_key_here'}
).json()
print(f"Plan: {info['plan']}")
print(f"Used today: {info['requests_today']}/{info['daily_limit']}")
print(f"All-time: {info['requests_total']}")
| URLSnap API | Selenium | Playwright | |
|---|---|---|---|
| Setup time | ~1 minute | ~30 min | ~15 min |
| Dependencies | requests only | ChromeDriver + browser | browser binaries |
| Works on serverless | ✅ | ❌ | ❌ (usually) |
| Managed updates | ✅ | ❌ | ❌ |
| Best for | Fast integration, cloud apps | Browser automation & testing | E2E testing, local scripts |
Free tier: 20 screenshots/day. No credit card, no browser install.
Get your free API key →