VictoriaMetrics is a high-performance, cost-efficient time series database — drop-in Prometheus replacement with 10x better compression. Remote write from Prometheus or prom-client: POST https://victoriametrics:8428/api/v1/write with Prometheus protobuf format. prom-client remote write: new PrometheusRemoteWrite({ url: VM_URL, pushInterval: 15000 }). MetricsQL extends PromQL: increase_pure() (no extrapolation), rate_over_sum(), rollup_scrape_interval(), with() template clauses, union() to merge series. Instant query: GET /api/v1/query?query=rate(http_requests_total[5m])&time=1718000000. Range query: GET /api/v1/query_range?query=...&start=...&end=...&step=60s. Cluster: vminsert (write, shards by metric name), vmselect (query, merges from vmstorage), vmstorage (storage nodes). vmagent scrapes targets and forwards to vminsert — lighter than Prometheus, supports relabeling with metric_relabel_configs. Downsampling: retentionPeriod=1y with -downsampling.period=30d:1m,1y:5m — keep 1m resolution for 30d, 5m for 1y. VictoriaLogs: POST /insert/jsonline?_stream_fields=host,app&_time_field=timestamp ingests logs; GET /select/logsql/query?query=_stream:{app="api"} | json_fields | stats count() by status queries. vmctl migrates from Prometheus: vmctl prometheus --prom-snapshot=/var/prometheus/snapshots/.... Claude Code generates VictoriaMetrics remote write configs, MetricsQL queries, vmagent scrape configs, and prom-client integrations.
CLAUDE.md for VictoriaMetrics
## VictoriaMetrics Stack
- Single-node: docker run -p 8428:8428 victoriametrics/victoria-metrics —retentionPeriod=90d
- Remote write: POST /api/v1/write (Prometheus protobuf) from prom-client or Prometheus
- Query API: /api/v1/query and /api/v1/query_range — compatible with Grafana Prometheus datasource
- MetricsQL: superset of PromQL — use increase_pure(), rollup(), with() templates
- vmagent: drop-in Prometheus scraper with remote_write to VM — lighter, supports multi-target
- Cluster: vminsert:8480/insert/0/prometheus/api/v1/write, vmselect:8481/select/0/prometheus
- Retention: -retentionPeriod=6m, downsampling: -downsampling.period=30d:1m,6m:5m
Remote Write Client
// lib/victoriametrics/remote-write.ts — push metrics via remote write protocol
import { Registry } from "prom-client"
/** Push all metrics in a registry to VictoriaMetrics via Prometheus text format */
export async function pushMetrics(
register: Registry,
options: {
url: string // e.g. https://vm:8428/api/v1/import/prometheus
headers?: Record<string, string>
},
): Promise<void> {
const metricsText = await register.metrics()
const res = await fetch(options.url, {
method: "POST",
headers: {
"Content-Type": register.contentType,
...options.headers,
},
body: metricsText,
})
if (!res.ok) {
throw new Error(`VictoriaMetrics push failed ${res.status}: ${await res.text()}`)
}
}
/** Push metrics on a schedule */
export function startMetricsPush(
register: Registry,
vmUrl: string,
intervalMs = 15_000,
): () => void {
const timer = setInterval(
() => pushMetrics(register, { url: `${vmUrl}/api/v1/import/prometheus` }).catch(console.error),
intervalMs,
)
return () => clearInterval(timer)
}
VictoriaMetrics Query Client
// lib/victoriametrics/query.ts — MetricsQL query client
const VM_URL = process.env.VICTORIAMETRICS_URL ?? "http://localhost:8428"
export type InstantResult = {
metric: Record<string, string>
value: [number, string] // [timestamp, value]
}
export type RangeResult = {
metric: Record<string, string>
values: Array<[number, string]>
}
export async function instantQuery(
query: string,
time?: Date,
): Promise<InstantResult[]> {
const params = new URLSearchParams({ query })
if (time) params.set("time", String(Math.floor(time.getTime() / 1000)))
const res = await fetch(`${VM_URL}/api/v1/query?${params}`)
if (!res.ok) throw new Error(`VM query error: ${await res.text()}`)
const body = await res.json()
return body.data.result as InstantResult[]
}
export async function rangeQuery(
query: string,
start: Date,
end: Date,
step = "60s",
): Promise<RangeResult[]> {
const params = new URLSearchParams({
query,
start: String(Math.floor(start.getTime() / 1000)),
end: String(Math.floor(end.getTime() / 1000)),
step,
})
const res = await fetch(`${VM_URL}/api/v1/query_range?${params}`)
if (!res.ok) throw new Error(`VM range query error: ${await res.text()}`)
const body = await res.json()
return body.data.result as RangeResult[]
}
/** Convenience: get current error rate for a service */
export async function getErrorRate(service: string): Promise<number> {
const results = await instantQuery(
`sum(rate(http_requests_total{service="${service}",status_code=~"5.."}[5m])) / sum(rate(http_requests_total{service="${service}"}[5m]))`,
)
if (!results.length) return 0
return parseFloat(results[0].value[1]) || 0
}
/** MetricsQL with() template — compute p99 latency per route */
export async function getLatencyPercentiles(
routes: string[],
): Promise<Record<string, { p50: number; p95: number; p99: number }>> {
const query = `
with (
latency = sum(rate(http_request_duration_seconds_bucket{route=~"${routes.join("|")}"}[5m])) by (le, route)
)
histogram_quantile(0.99, latency)
`.trim()
const p99Results = await instantQuery(query)
const p95Results = await instantQuery(query.replace("0.99", "0.95"))
const p50Results = await instantQuery(query.replace("0.99", "0.50"))
const result: Record<string, { p50: number; p95: number; p99: number }> = {}
for (const r of p99Results) {
const route = r.metric.route
result[route] = {
p50: parseFloat(p50Results.find((x) => x.metric.route === route)?.value[1] ?? "0"),
p95: parseFloat(p95Results.find((x) => x.metric.route === route)?.value[1] ?? "0"),
p99: parseFloat(r.value[1]),
}
}
return result
}
vmagent Scrape Config
# vmagent.yml — lightweight Prometheus-compatible scraper forwarding to VictoriaMetrics
global:
scrape_interval: 15s
external_labels:
cluster: production
datacenter: us-east-1
scrape_configs:
- job_name: my-app
static_configs:
- targets: ["app-1:3000", "app-2:3000", "app-3:3000"]
metrics_path: /api/metrics
authorization:
credentials_file: /etc/vmagent/metrics-token
- job_name: node-exporters
static_configs:
- targets: ["host-1:9100", "host-2:9100"]
- job_name: postgres
static_configs:
- targets: ["postgres-exporter:9187"]
# Kubernetes pod discovery
- job_name: k8s-pods
kubernetes_sd_configs:
- role: pod
namespaces: { names: ["production"] }
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: "true"
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
remote_write:
- url: http://victoriametrics:8428/api/v1/write
queue_config:
max_samples_per_send: 10000
capacity: 50000
# For cluster mode:
# url: http://vminsert:8480/insert/0/prometheus/api/v1/write
VictoriaMetrics Docker Compose
# docker-compose.yml — VictoriaMetrics + Grafana + vmagent stack
services:
victoriametrics:
image: victoriametrics/victoria-metrics:v1.101.0
command:
- -storageDataPath=/vm-data
- -retentionPeriod=6
- -downsampling.period=30d:1m,6m:5m
- -search.maxQueryDuration=30s
- -httpListenAddr=:8428
volumes:
- vm-data:/vm-data
ports:
- "8428:8428"
vmagent:
image: victoriametrics/vmagent:v1.101.0
command:
- -promscrape.config=/etc/vmagent/vmagent.yml
- -remoteWrite.url=http://victoriametrics:8428/api/v1/write
- -remoteWrite.tmpDataPath=/vmagent-data
- -remoteWrite.maxDiskUsagePerURL=1GB
volumes:
- ./vmagent.yml:/etc/vmagent/vmagent.yml:ro
- vmagent-data:/vmagent-data
grafana:
image: grafana/grafana:10.4.0
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
volumes:
- grafana-data:/var/lib/grafana
- ./grafana/provisioning:/etc/grafana/provisioning
ports:
- "3001:3000"
victorialogs:
image: victoriametrics/victoria-logs:v0.28.0-victorialogs
command:
- -storageDataPath=/vl-data
- -retentionPeriod=30d
volumes:
- vl-data:/vl-data
ports:
- "9428:9428"
volumes:
vm-data:
vmagent-data:
grafana-data:
vl-data:
For the Prometheus alternative when needing the de-facto standard pull-based metrics system with the richest ecosystem of exporters, Alertmanager integration, operator-managed Kubernetes deployment (kube-prometheus-stack), and maximum community support — Prometheus is the incumbent while VictoriaMetrics provides 10× better compression, faster ingestion, lower RAM usage, and longer retention for the same hardware, making it a strong drop-in replacement at scale. For the InfluxDB alternative when needing purpose-built IoT time series storage with the line protocol for high-cardinality sensor streams, native SQL queries in v3, and multi-tenancy with organizations and buckets — InfluxDB is optimized for event-driven time series (IoT, APM) while VictoriaMetrics is Prometheus-native and excels at infrastructure metrics with PromQL/MetricsQL workloads. The Claude Skills 360 bundle includes VictoriaMetrics skill sets covering remote write, MetricsQL queries, and vmagent configs. Start with the free tier to try high-performance metrics generation.