Groq provides ultra-fast LLM inference with an OpenAI-compatible API — new Groq({ apiKey: process.env.GROQ_API_KEY }) initializes the client. groq.chat.completions.create({ model: "llama-3.3-70b-versatile", messages }) generates completions. stream: true enables streaming with for await (const chunk of completion). Tool calling: tools: [{ type: "function", function: { name, description, parameters } }] with tool_choice: "auto". JSON mode: response_format: { type: "json_object" } with a system prompt instructing JSON output. Audio: groq.audio.transcriptions.create({ file, model: "whisper-large-v3", language: "en" }) transcribes audio. Models: llama-3.3-70b-versatile (balanced), llama-3.1-8b-instant (fastest), mixtral-8x7b-32768 (long context), gemma2-9b-it (efficient). Rate limits: 30 req/min free tier. Groq delivers sub-100ms first-token latency. Claude Code generates Groq chat completions, tool-using agents, streaming APIs, and audio transcription.
CLAUDE.md for Groq
## Groq Stack
- Version: groq-sdk >= 0.9
- Init: const groq = new Groq({ apiKey: process.env.GROQ_API_KEY })
- Chat: const response = await groq.chat.completions.create({ model: "llama-3.3-70b-versatile", messages: [{ role: "user", content: prompt }], temperature: 0.7, max_tokens: 1024 })
- Answer: response.choices[0].message.content
- Stream: const stream = await groq.chat.completions.create({ ..., stream: true }); for await (const chunk of stream) process.stdout.write(chunk.choices[0]?.delta?.content ?? "")
- JSON: model: "llama-3.3-70b-versatile", response_format: { type: "json_object" } + system: "Respond only with valid JSON"
- Audio: await groq.audio.transcriptions.create({ file: fs.createReadStream(path), model: "whisper-large-v3" })
Groq Client
// lib/groq/client.ts — Groq SDK with helpers
import Groq from "groq-sdk"
import type { ChatCompletionMessageParam, ChatCompletionTool } from "groq-sdk/resources/chat"
const groq = new Groq({ apiKey: process.env.GROQ_API_KEY })
// Available models — choose based on speed vs. capability tradeoff
export const MODELS = {
FAST: "llama-3.1-8b-instant", // ~50ms TTFT, 750k tokens/min
BALANCED: "llama-3.3-70b-versatile", // ~80ms TTFT, high quality
LONG: "mixtral-8x7b-32768", // 32k context window
EFFICIENT: "gemma2-9b-it", // Good quality, very fast
} as const
export type GroqModel = (typeof MODELS)[keyof typeof MODELS]
export interface CompletionOptions {
model?: GroqModel
systemPrompt?: string
temperature?: number
maxTokens?: number
tools?: ChatCompletionTool[]
jsonMode?: boolean
}
/** Single completion — returns content string */
export async function complete(
prompt: string,
options: CompletionOptions = {},
): Promise<string> {
const {
model = MODELS.BALANCED,
systemPrompt,
temperature = 0.7,
maxTokens = 1024,
tools,
jsonMode = false,
} = options
const messages: ChatCompletionMessageParam[] = []
if (systemPrompt) messages.push({ role: "system", content: systemPrompt })
if (jsonMode && !systemPrompt) messages.push({ role: "system", content: "Respond only with valid JSON." })
messages.push({ role: "user", content: prompt })
const response = await groq.chat.completions.create({
model,
messages,
temperature,
max_tokens: maxTokens,
...(jsonMode ? { response_format: { type: "json_object" } } : {}),
...(tools ? { tools, tool_choice: "auto" } : {}),
})
return response.choices[0].message.content ?? ""
}
/** Multi-turn chat with conversation history */
export async function chat(
messages: ChatCompletionMessageParam[],
options: CompletionOptions = {},
): Promise<{ content: string; toolCalls?: Groq.Chat.Completions.ChatCompletionMessage.ToolCall[] }> {
const { model = MODELS.BALANCED, temperature = 0.7, maxTokens = 2048, tools } = options
const response = await groq.chat.completions.create({
model,
messages,
temperature,
max_tokens: maxTokens,
...(tools ? { tools, tool_choice: "auto" } : {}),
})
const message = response.choices[0].message
return {
content: message.content ?? "",
toolCalls: message.tool_calls ?? undefined,
}
}
/** Stream completion — yields text chunks */
export async function* stream(
prompt: string,
options: CompletionOptions = {},
): AsyncGenerator<string> {
const { model = MODELS.BALANCED, systemPrompt, temperature = 0.7, maxTokens = 2048 } = options
const messages: ChatCompletionMessageParam[] = []
if (systemPrompt) messages.push({ role: "system", content: systemPrompt })
messages.push({ role: "user", content: prompt })
const completion = await groq.chat.completions.create({
model,
messages,
temperature,
max_tokens: maxTokens,
stream: true,
})
for await (const chunk of completion) {
const delta = chunk.choices[0]?.delta?.content
if (delta) yield delta
}
}
/** JSON-mode completion with type safety */
export async function generateJSON<T>(
prompt: string,
options: Omit<CompletionOptions, "jsonMode"> = {},
): Promise<T> {
const text = await complete(prompt, { ...options, jsonMode: true })
return JSON.parse(text) as T
}
/** Whisper audio transcription */
export async function transcribeAudio(
audioFile: File | Blob,
language?: string,
): Promise<{ text: string; language?: string }> {
const transcription = await groq.audio.transcriptions.create({
file: audioFile as any,
model: "whisper-large-v3",
...(language ? { language } : {}),
response_format: "verbose_json",
})
return {
text: (transcription as any).text,
language: (transcription as any).language,
}
}
export { groq }
Streaming API Route
// app/api/chat/route.ts — Next.js streaming chat with Groq
import { NextResponse } from "next/server"
import { z } from "zod"
import { groq, MODELS } from "@/lib/groq/client"
import type { ChatCompletionMessageParam } from "groq-sdk/resources/chat"
const ChatSchema = z.object({
messages: z.array(z.object({
role: z.enum(["user", "assistant"]),
content: z.string().max(8000),
})).max(50),
model: z.enum(["fast", "balanced", "long"]).default("balanced"),
systemPrompt: z.string().max(2000).optional(),
})
const MODEL_MAP = {
fast: MODELS.FAST,
balanced: MODELS.BALANCED,
long: MODELS.LONG,
} as const
export async function POST(req: Request) {
const body = await req.json()
const { messages, model, systemPrompt } = ChatSchema.parse(body)
const groqMessages: ChatCompletionMessageParam[] = []
if (systemPrompt) groqMessages.push({ role: "system", content: systemPrompt })
groqMessages.push(...messages)
const stream = await groq.chat.completions.create({
model: MODEL_MAP[model],
messages: groqMessages,
temperature: 0.7,
max_tokens: 2048,
stream: true,
})
const encoder = new TextEncoder()
const readable = new ReadableStream({
async start(controller) {
try {
for await (const chunk of stream) {
const delta = chunk.choices[0]?.delta?.content
if (delta) {
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ content: delta })}\n\n`))
}
}
controller.enqueue(encoder.encode("data: [DONE]\n\n"))
} finally {
controller.close()
}
},
})
return new Response(readable, {
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
},
})
}
For the OpenAI alternative when GPT-4o/o1 reasoning models, DALL-E image generation, function calling with the latest OpenAI features, or enterprise SLAs with OpenAI are required — OpenAI is the most capable but slower and more expensive while Groq provides 10-20x faster inference (tokens per second) at lower cost for Llama and Mixtral models, see the OpenAI guide. For the Anthropic Claude alternative when the highest-quality reasoning, the largest context window (200k tokens), or agentic coding capabilities are needed — Claude models (claude-sonnet-4-6) outperform open models on complex tasks while Groq’s open-model hosting is ideal for high-throughput, cost-sensitive applications, see the Anthropic SDK guide. The Claude Skills 360 bundle includes Groq skill sets covering streaming chat, tool calling, and audio transcription. Start with the free tier to try fast inference generation.