Google Genkit is a Firebase SDK for building AI applications — import { genkit } from "genkit" initializes the framework. const ai = genkit({ plugins: [googleAI({ apiKey })], model: gemini15Flash }) configures providers. ai.defineFlow("summarize", { inputSchema, outputSchema }, async (input) => { const { text } = await ai.generate({ prompt: input.text, config: { temperature: 0.3 } }); return { summary: text } }) defines a typed flow. ai.definePrompt("classify", { inputSchema }, "Classify: {{input}}") creates a reusable prompt. ai.defineTool("search", { description, inputSchema, outputSchema }, async ({ query }) => search(query)) adds function calling. ai.defineRetriever("docs", { retrieve: async (query) => await vectorSearch(query) }) adds RAG. const result = await runFlow(myFlow, input) executes a flow. streamFlow(myFlow, input) streams output. Genkit’s developer UI (genkit start) provides tracing and flow testing. Claude Code generates Genkit flows, RAG pipelines, tool-using agents, and typed AI functions.
CLAUDE.md for Genkit
## Genkit Stack
- Version: genkit >= 0.9, @genkit-ai/googleai >= 0.9
- Init: const ai = genkit({ plugins: [googleAI()], model: "googleai/gemini-1.5-flash" })
- Flow: const myFlow = ai.defineFlow({ name: "flowName", inputSchema: z.string(), outputSchema: z.string() }, async (input) => { const { text } = await ai.generate({ prompt: input }); return text })
- Run: await runFlow(myFlow, "input text")
- Stream: for await (const chunk of streamFlow(myFlow, input)) console.log(chunk)
- Prompt: const prompt = ai.definePrompt({ name: "name", input: { schema: z.object({...}) } }, "Template {{field}}")
- Tool: ai.defineTool({ name, description, inputSchema, outputSchema }, async (args) => result)
Genkit Configuration
// lib/genkit/index.ts — Genkit setup with Google AI plugin
import { genkit, z } from "genkit"
import { googleAI, gemini15Flash, gemini15Pro } from "@genkit-ai/googleai"
export const ai = genkit({
plugins: [
googleAI({
apiKey: process.env.GOOGLE_AI_API_KEY,
}),
],
model: gemini15Flash,
// Enable tracing in development
enableTracingAndMetrics: process.env.NODE_ENV === "development",
})
export { gemini15Flash, gemini15Pro }
export { z }
AI Flows
// lib/genkit/flows.ts — type-safe AI flows with tools and RAG
import { runFlow, streamFlow } from "genkit"
import { ai, z, gemini15Flash, gemini15Pro } from "./index"
// ── Schema definitions ─────────────────────────────────────────────────────
const SummarizeInput = z.object({
text: z.string().min(50).max(20000),
length: z.enum(["short", "medium", "long"]).default("medium"),
format: z.enum(["prose", "bullets"]).default("prose"),
})
const SummarizeOutput = z.object({
summary: z.string(),
keyPoints: z.array(z.string()),
wordCount: z.number(),
})
const ClassifyInput = z.object({
text: z.string().max(5000),
categories: z.array(z.string()),
})
const ClassifyOutput = z.object({
category: z.string(),
confidence: z.number().min(0).max(1),
reasoning: z.string(),
})
// ── Tools ──────────────────────────────────────────────────────────────────
const searchWebTool = ai.defineTool(
{
name: "searchWeb",
description: "Search the web for current information about a topic",
inputSchema: z.object({ query: z.string(), maxResults: z.number().default(5) }),
outputSchema: z.object({
results: z.array(z.object({ title: z.string(), snippet: z.string(), url: z.string() })),
}),
},
async ({ query, maxResults }) => {
// In production, call a real search API like Serper, Brave, or Tavily
return {
results: [
{ title: `Mock result for: ${query}`, snippet: "Search result content...", url: "https://example.com" },
].slice(0, maxResults),
}
},
)
const getCurrentDateTool = ai.defineTool(
{
name: "getCurrentDate",
description: "Get the current date and time in UTC",
inputSchema: z.object({}),
outputSchema: z.object({ date: z.string(), timestamp: z.number() }),
},
async () => ({ date: new Date().toISOString(), timestamp: Date.now() }),
)
// ── Flows ──────────────────────────────────────────────────────────────────
export const summarizeFlow = ai.defineFlow(
{
name: "summarize",
inputSchema: SummarizeInput,
outputSchema: SummarizeOutput,
},
async ({ text, length, format }) => {
const wordTargets = { short: "50-80 words", medium: "100-150 words", long: "200-300 words" }
const { output } = await ai.generate({
model: gemini15Flash,
prompt: [
{ text: `Summarize the following text in ${wordTargets[length]}.` },
{ text: `Format: ${format === "bullets" ? "Use bullet points for the summary." : "Write in flowing prose."}` },
{ text: `Also extract 3-5 key points as short phrases.\n\nText to summarize:\n${text}` },
],
output: {
schema: z.object({
summary: z.string(),
keyPoints: z.array(z.string()),
}),
},
config: { temperature: 0.3 },
})
return {
summary: output!.summary,
keyPoints: output!.keyPoints,
wordCount: output!.summary.split(/\s+/).length,
}
},
)
export const classifyFlow = ai.defineFlow(
{
name: "classify",
inputSchema: ClassifyInput,
outputSchema: ClassifyOutput,
},
async ({ text, categories }) => {
const { output } = await ai.generate({
model: gemini15Flash,
prompt: `Classify the following text into one of these categories: ${categories.join(", ")}.
Text: "${text}"
Respond with JSON: { "category": "<chosen category>", "confidence": <0-1>, "reasoning": "<brief explanation>" }`,
output: { schema: ClassifyOutput },
config: { temperature: 0.1 },
})
return output!
},
)
export const researchFlow = ai.defineFlow(
{
name: "research",
inputSchema: z.object({ topic: z.string(), depth: z.enum(["brief", "detailed"]).default("brief") }),
outputSchema: z.object({ report: z.string(), sources: z.array(z.string()) }),
},
async ({ topic, depth }) => {
const response = await ai.generate({
model: gemini15Pro,
tools: [searchWebTool, getCurrentDateTool],
prompt: `Research the following topic ${depth === "detailed" ? "in depth" : "briefly"}: ${topic}
Use the searchWeb tool to find current information and the getCurrentDate tool to note when this was researched.
Write a ${depth === "detailed" ? "comprehensive" : "concise"} report based on your findings.`,
config: { temperature: 0.4 },
})
return {
report: response.text,
sources: ["Web search results (see tool calls)"],
}
},
)
// ── Re-exports for callers ─────────────────────────────────────────────────
export { runFlow, streamFlow }
Next.js Server Action Integration
// app/actions/ai.ts — Genkit flows as Next.js server actions
"use server"
import { runFlow, streamFlow } from "@/lib/genkit/flows"
import { summarizeFlow, classifyFlow, researchFlow } from "@/lib/genkit/flows"
import { z } from "zod"
import { createStreamableValue } from "ai/rsc"
export async function summarizeAction(text: string, options?: { length?: "short" | "medium" | "long" }) {
const result = await runFlow(summarizeFlow, {
text,
length: options?.length ?? "medium",
format: "prose",
})
return result
}
export async function classifyAction(text: string, categories: string[]) {
return runFlow(classifyFlow, { text, categories })
}
export async function streamResearchAction(topic: string) {
const stream = createStreamableValue("")
;(async () => {
for await (const chunk of streamFlow(researchFlow, { topic, depth: "brief" })) {
stream.update(chunk)
}
stream.done()
})()
return { output: stream.value }
}
For the Vercel AI SDK alternative when building primarily Next.js applications with streaming UI components, useChat/useCompletion hooks, and a simpler single-provider focus — Vercel AI SDK is tightly integrated with Next.js streaming while Genkit provides multi-step typed flows, built-in developer UI tracing, and Firebase/Google Cloud integration, see the Vercel AI SDK guide. For the LangChain.js alternative when a more comprehensive chain/agent/memory ecosystem with Python parity, a vast library of document loaders, and extensive third-party integrations are needed — LangChain.js has the broadest ecosystem while Genkit provides tighter TypeScript safety and better Firebase integration, see the LangChain guide. The Claude Skills 360 bundle includes Genkit skill sets covering typed flows, tools, RAG, and streaming. Start with the free tier to try Firebase AI generation.