Trino is the distributed SQL query engine for data lakes — federated queries across any data source. Python: import trino.dbapi as trino_db, conn = trino_db.connect(host="trino-host", port=443, user="user", auth=trino.auth.JWTAuthentication(TOKEN), catalog="iceberg", schema="analytics"). cursor = conn.cursor(), cursor.execute("SELECT * FROM orders WHERE dt >= current_date - INTERVAL '7' DAY"), cursor.fetchall() returns rows. Catalogs define connectors: iceberg catalog queries Iceberg tables on S3, hive queries Hive metastore tables, postgresql queries Postgres directly. Federated query: SELECT o.order_id, u.email FROM iceberg.analytics.orders o JOIN postgresql.public.users u ON o.user_id = u.id — joins across lakes and OLTP in one SQL. JDBC: jdbc:trino://host:443/iceberg/analytics?SSL=true&user=user&password=TOKEN. REST API: POST /v1/statement with SQL body returns { nextUri, data, columns, stats } — poll nextUri until status != "RUNNING". trino_credentials.header for Basic auth. Cost-based optimizer (CBO): ANALYZE table collects statistics for better query plans. Partition pruning: filter on partition columns (dt, region) to skip data files. EXPLAIN (FORMAT JSON) shows query plan including join strategy and pushdowns. session_properties: hash_partition_count=64, join_distribution_type=BROADCAST for small dimension tables. USE catalog.schema sets default. SHOW CATALOGS, SHOW SCHEMAS FROM iceberg, SHOW TABLES FROM iceberg.analytics. Claude Code generates Trino Python clients, federated SQL queries, REST API polling clients, and catalog configurations.
CLAUDE.md for Trino
## Trino Stack
- Python: trino (PyPI) >= 0.328 — trino.dbapi.connect(host, port=443, auth=JWTAuthentication(TOKEN))
- Auth: JWTAuthentication(TOKEN) or BasicAuthentication(user, password) or KerberosAuthentication
- Catalog: specify in connect() or USE catalog.schema in SQL
- Federated: SELECT from iceberg.db.table JOIN postgresql.public.table — one SQL, multiple sources
- REST: POST /v1/statement → poll nextUri until state == "FINISHED" or "FAILED"
- EXPLAIN: EXPLAIN (FORMAT JSON) SELECT ... — analyze query plan for optimization
Trino Python Client
# lib/trino/client.py — Trino query client
import os
import time
from typing import Any, Optional
import trino
import trino.dbapi
import trino.auth
import pandas as pd
def get_connection(
catalog: str = "iceberg",
schema: str = "analytics",
) -> trino.dbapi.Connection:
"""Create a Trino connection using environment config."""
host = os.environ["TRINO_HOST"]
port = int(os.environ.get("TRINO_PORT", "443"))
user = os.environ.get("TRINO_USER", "query-service")
token = os.environ.get("TRINO_TOKEN")
auth = trino.auth.JWTAuthentication(token) if token else trino.auth.BasicAuthentication(
username=user,
password=os.environ.get("TRINO_PASSWORD", ""),
)
return trino.dbapi.connect(
host=host,
port=port,
user=user,
auth=auth,
catalog=catalog,
schema=schema,
http_scheme="https" if port == 443 else "http",
session_properties={
"query_max_execution_time": "10m",
"join_distribution_type": "AUTOMATIC",
},
)
def query_to_df(
sql: str,
catalog: str = "iceberg",
schema: str = "analytics",
params: Optional[list] = None,
) -> pd.DataFrame:
"""Execute SQL and return a DataFrame."""
conn = get_connection(catalog=catalog, schema=schema)
cur = conn.cursor()
cur.execute(sql, params)
rows = cur.fetchall()
cols = [desc[0] for desc in cur.description] if cur.description else []
return pd.DataFrame(rows, columns=cols)
def query_rows(
sql: str,
catalog: str = "iceberg",
schema: str = "analytics",
) -> list[dict[str, Any]]:
"""Execute SQL and return list of dicts."""
df = query_to_df(sql, catalog=catalog, schema=schema)
return df.to_dict("records")
def explain_query(sql: str) -> dict:
"""Get Trino query plan as JSON for optimization analysis."""
conn = get_connection()
cur = conn.cursor()
cur.execute(f"EXPLAIN (FORMAT JSON) {sql}")
row = cur.fetchone()
import json
return json.loads(row[0]) if row else {}
Federated Query Patterns
# lib/trino/queries.py — common federated query patterns
from .client import query_rows, query_to_df
from datetime import date, timedelta
def join_lake_and_oltp(start_date: date, end_date: date):
"""Join Iceberg data lake orders with Postgres OLTP users."""
sql = f"""
SELECT
o.order_id,
o.amount,
o.status,
o.created_at,
u.email,
u.plan,
u.country
FROM iceberg.analytics.orders o
JOIN postgresql.public.users u
ON o.user_id = u.id
WHERE
o.dt >= DATE '{start_date.isoformat()}' -- partition pruning
AND o.dt <= DATE '{end_date.isoformat()}'
AND o.status = 'completed'
ORDER BY o.created_at DESC
LIMIT 10000
"""
return query_rows(sql, catalog="iceberg", schema="analytics")
def revenue_by_country_and_plan(lookback_days: int = 30):
"""Cross-catalog aggregation with GROUP BY rollup."""
sql = f"""
WITH order_data AS (
SELECT
o.user_id,
o.amount,
o.created_at
FROM iceberg.analytics.orders o
WHERE
o.dt >= DATE_ADD('day', -{lookback_days}, current_date)
AND o.status = 'completed'
)
SELECT
u.country,
u.plan,
count(*) AS order_count,
sum(od.amount) AS total_revenue,
avg(od.amount) AS avg_order_value,
count(DISTINCT od.user_id) AS unique_buyers
FROM order_data od
JOIN postgresql.public.users u
ON od.user_id = u.id
GROUP BY GROUPING SETS (
(u.country, u.plan),
(u.country),
(u.plan),
()
)
ORDER BY total_revenue DESC NULLS LAST
"""
return query_to_df(sql, catalog="iceberg")
def incremental_stats_refresh(as_of_date: date):
"""Read from Kafka connector for near-realtime event stats."""
sql = f"""
SELECT
json_extract_scalar("_message", '$.event_type') AS event_type,
count(*) AS event_count,
date_trunc('hour', from_unixtime(
CAST(json_extract_scalar("_message", '$.timestamp') AS BIGINT) / 1000
)) AS event_hour
FROM kafka.default.app_events
WHERE _timestamp >= TIMESTAMP '{as_of_date} 00:00:00'
GROUP BY 3, 1
ORDER BY 3 DESC
LIMIT 500
"""
return query_rows(sql, catalog="kafka", schema="default")
Trino REST API Client (TypeScript)
// lib/trino/rest-client.ts — Trino REST API polling client
const TRINO_HOST = process.env.TRINO_HOST!
const TRINO_TOKEN = process.env.TRINO_TOKEN!
const TRINO_USER = process.env.TRINO_USER ?? "query-service"
type TrinoQueryState = "QUEUED" | "PLANNING" | "STARTING" | "RUNNING" | "FINISHED" | "FAILED" | "CANCELED"
type TrinoResponse = {
id: string
nextUri?: string
data?: unknown[][]
columns?: Array<{ name: string; type: string }>
stats: { state: TrinoQueryState; totalRows?: number }
error?: { message: string; errorCode: number }
}
const headers = {
"Authorization": `Bearer ${TRINO_TOKEN}`,
"X-Trino-User": TRINO_USER,
"X-Trino-Catalog": "iceberg",
"X-Trino-Schema": "analytics",
"X-Trino-Time-Zone": "UTC",
"Content-Type": "application/json",
}
export async function runTrinoQuery(sql: string): Promise<Array<Record<string, unknown>>> {
// Submit query
const submitRes = await fetch(`https://${TRINO_HOST}/v1/statement`, {
method: "POST",
headers,
body: sql,
})
if (!submitRes.ok) throw new Error(`Trino submit error ${submitRes.status}: ${await submitRes.text()}`)
let response: TrinoResponse = await submitRes.json()
const allRows: unknown[][] = []
let columns: Array<{ name: string }> | undefined
// Poll until finished
while (response.nextUri || response.stats.state === "RUNNING") {
if (response.data) allRows.push(...response.data)
if (response.columns) columns = response.columns
if (response.error) throw new Error(`Trino query error: ${response.error.message}`)
if (!response.nextUri) break
await new Promise((r) => setTimeout(r, 500))
const pollRes = await fetch(response.nextUri, { headers })
if (!pollRes.ok) throw new Error(`Trino poll error: ${pollRes.status}`)
response = await pollRes.json()
}
if (response.data) allRows.push(...response.data)
if (response.columns) columns = response.columns
if (response.error) throw new Error(`Trino failed: ${response.error.message}`)
// Map rows to objects
if (!columns) return []
return allRows.map((row) =>
Object.fromEntries(columns!.map((col, i) => [col.name, row[i]]))
)
}
For the Presto alternative when operating in a Meta/Facebook environment or using PrestoDB — Trino was forked from Presto Original (PrestoSQL), is more actively maintained with faster quarterly releases, and is the de facto community version while PrestoDB is Meta’s internal fork. Athena runs Trino under the hood for serverless federated queries on S3 without cluster management. For the Apache Spark alternative when needing batch processing with large shuffles, MLlib integration for machine learning, Structured Streaming for micro-batch streaming, and a cluster managed by Databricks or EMR — Spark is the standard for complex multi-step data pipelines while Trino is optimized for interactive ad-hoc SQL analytics across heterogeneous data sources with sub-second to sub-minute latency. The Claude Skills 360 bundle includes Trino skill sets covering Python client, federated queries, REST API polling, and catalog configuration. Start with the free tier to try distributed SQL generation.