Gradio builds ML demos in Python. pip install gradio. Simple interface: import gradio as gr, gr.Interface(fn=predict, inputs="text", outputs="text").launch(). Auto-detects types from hints: inputs=["image","text"] → Image + Textbox. Components: gr.Textbox(label="Prompt", lines=3, placeholder="Enter text..."), gr.Image(type="pil"), gr.Audio(type="numpy"), gr.Slider(0, 1, value=0.7, label="Temperature"), gr.Dropdown(["llama3","gpt4"], label="Model"). Blocks for complex layouts: with gr.Blocks() as demo: with gr.Row(): img_in=gr.Image(); img_out=gr.Image(). btn=gr.Button("Generate"). btn.click(fn=process, inputs=img_in, outputs=img_out). ChatInterface: gr.ChatInterface(fn=chat_fn, type="messages") — chat_fn(message, history) takes list of {"role","content"}. Streaming: def stream(prompt):\n for chunk in llm.stream(prompt): yield chunk. outputs=gr.Textbox() with fn=stream. State: state = gr.State([]) — persists across interactions for one session. Tabs: with gr.Tabs(): with gr.TabItem("Generate"): .... Examples: gr.Examples(examples=[["a cat","realistic"]], inputs=[prompt,style]). Progress: gr.Progress()(tqdm=True) or progress(0.5, desc="Loading"). Auth: demo.launch(auth=("user","pass")). Share: demo.launch(share=True) creates public URL. HuggingFace Spaces: push app.py + requirements.txt. Gradio client: from gradio_client import Client, result = Client("user/space-name").predict("hello", api_name="/predict"). Claude Code generates Gradio demos for image classification, text generation, audio processing, chatbots, and multi-model comparison UIs.
CLAUDE.md for Gradio
## Gradio Stack
- Version: gradio >= 4.40
- Simple: gr.Interface(fn, inputs, outputs).launch() — zero boilerplate
- Complex: gr.Blocks() context manager with Row/Column/Tab layout
- Chat: gr.ChatInterface(fn, type="messages") — fn(message, history) → str or generator
- Streaming: fn as generator with yield chunks → gr.Textbox/gr.Audio outputs
- State: gr.State(initial_value) — session-persistent, passed as fn arg
- Launch: .launch(server_port=7860, share=True, auth=("user","pass"))
- HF Spaces: push app.py + requirements.txt to HuggingFace Hub
Gradio Application
# app/gradio_app.py — multi-feature Gradio demo with chat, image, and audio
from __future__ import annotations
import time
from typing import Generator
import gradio as gr
# ── Dummy model functions (replace with real models) ──────────────────────────
def classify_image(image) -> dict[str, float]:
"""Image classification — returns label probabilities."""
time.sleep(0.5) # Simulate inference
return {"cat": 0.85, "dog": 0.10, "bird": 0.05}
def generate_text(
prompt: str,
temperature: float,
max_tokens: int,
) -> Generator[str, None, None]:
"""Streaming text generation."""
words = f"Generated response for: '{prompt}' (temp={temperature:.1f})".split()
for i, word in enumerate(words):
time.sleep(0.05)
yield " ".join(words[: i + 1])
def chat_response(
message: str,
history: list[dict],
system_prompt: str,
) -> Generator[str, None, None]:
"""LLM chatbot with streaming — replace with real model."""
response = f"[{system_prompt}] Echo: {message}"
for i in range(1, len(response) + 1):
time.sleep(0.01)
yield response[:i]
def transcribe_audio(audio) -> str:
"""Whisper-style ASR — audio is (sample_rate, numpy_array)."""
if audio is None:
return ""
sample_rate, arr = audio
duration = len(arr) / sample_rate
return f"[Transcribed {duration:.1f}s of audio — plug in real Whisper here]"
def process_image(image, operation: str) -> "PIL.Image":
"""Image processing demo."""
from PIL import Image, ImageFilter, ImageOps
if image is None:
return None
ops = {
"Grayscale": lambda img: img.convert("L").convert("RGB"),
"Blur": lambda img: img.filter(ImageFilter.GaussianBlur(5)),
"Flip": lambda img: ImageOps.mirror(img),
"Rotate 90": lambda img: img.rotate(90, expand=True),
}
return ops.get(operation, lambda x: x)(image)
# ── Main Gradio app ────────────────────────────────────────────────────────────
def build_app() -> gr.Blocks:
theme = gr.themes.Soft(primary_hue="violet", secondary_hue="blue")
with gr.Blocks(
title="Claude Code ML Demo",
theme=theme,
css=".gradio-container {max-width: 900px; margin: auto}",
) as demo:
gr.Markdown("# Claude Code ML Demo")
gr.Markdown("Explore image classification, text generation, and chatbot capabilities.")
with gr.Tabs():
# ── Tab 1: Image Classification ──────────────────────────────────
with gr.TabItem("Image Classification"):
gr.Markdown("Upload an image to classify it.")
with gr.Row():
with gr.Column(scale=1):
img_input = gr.Image(
label="Input Image",
type="pil",
height=300,
)
classify_btn = gr.Button("Classify", variant="primary")
with gr.Column(scale=1):
label_output = gr.Label(
label="Predictions",
num_top_classes=3,
)
gr.Examples(
examples=[["examples/cat.jpg"], ["examples/dog.jpg"]],
inputs=[img_input],
)
classify_btn.click(classify_image, inputs=img_input, outputs=label_output)
# ── Tab 2: Text Generation with Streaming ─────────────────────────
with gr.TabItem("Text Generation"):
gr.Markdown("Stream text generation with configurable parameters.")
with gr.Row():
with gr.Column(scale=2):
prompt_input = gr.Textbox(
label="Prompt",
lines=3,
placeholder="Enter your prompt...",
)
with gr.Row():
temp_slider = gr.Slider(0.0, 1.0, value=0.7, label="Temperature", step=0.05)
tokens_input = gr.Number(value=200, label="Max Tokens", precision=0)
with gr.Column(scale=2):
gen_output = gr.Textbox(
label="Generated Text",
lines=8,
interactive=False,
)
gen_btn = gr.Button("Generate", variant="primary")
clear_btn = gr.ClearButton([prompt_input, gen_output])
gen_btn.click(
generate_text,
inputs=[prompt_input, temp_slider, tokens_input],
outputs=gen_output,
)
# ── Tab 3: Chatbot ─────────────────────────────────────────────────
with gr.TabItem("Chatbot"):
system_prompt = gr.Textbox(
label="System Prompt",
value="You are a helpful assistant.",
lines=2,
)
chatbot = gr.Chatbot(type="messages", height=400)
msg_box = gr.Textbox(label="Message", placeholder="Type a message...", lines=1)
with gr.Row():
send_btn = gr.Button("Send", variant="primary")
clear_btn = gr.ClearButton([msg_box, chatbot])
def user_turn(message, history):
history.append({"role": "user", "content": message})
return "", history
def bot_turn(history, system):
response = ""
history.append({"role": "assistant", "content": ""})
for chunk in chat_response(history[-2]["content"], history[:-1], system):
response = chunk
history[-1]["content"] = response
yield history
msg_box.submit(user_turn, [msg_box, chatbot], [msg_box, chatbot]).then(
bot_turn, [chatbot, system_prompt], chatbot
)
send_btn.click(user_turn, [msg_box, chatbot], [msg_box, chatbot]).then(
bot_turn, [chatbot, system_prompt], chatbot
)
# ── Tab 4: Audio Transcription ────────────────────────────────────
with gr.TabItem("Audio"):
gr.Markdown("Record or upload audio for transcription.")
audio_input = gr.Audio(label="Audio Input", sources=["microphone", "upload"])
transcript = gr.Textbox(label="Transcript", interactive=False, lines=4)
audio_input.change(transcribe_audio, inputs=audio_input, outputs=transcript)
# ── Tab 5: Image Processing ───────────────────────────────────────
with gr.TabItem("Image Processing"):
with gr.Row():
proc_in = gr.Image(label="Input", type="pil", height=300)
proc_out = gr.Image(label="Output", type="pil", height=300)
operation = gr.Radio(
["Grayscale", "Blur", "Flip", "Rotate 90"],
label="Operation", value="Grayscale",
)
proc_btn = gr.Button("Process")
proc_btn.click(process_image, inputs=[proc_in, operation], outputs=proc_out)
proc_in.change(process_image, inputs=[proc_in, operation], outputs=proc_out)
return demo
if __name__ == "__main__":
demo = build_app()
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
)
For the Streamlit alternative when building data-analytics dashboards with rich pandas dataframe rendering, Altair/Plotly charts, and session-state-driven multi-page apps that feel more like web applications — Streamlit’s re-run-on-interaction model works well for dashboards while Gradio’s component-based event wiring and ChatInterface with streaming are purpose-built for ML demo patterns like image classification, side-by-side model comparison, and chatbots where the interaction model is trigger-response rather than state mutation. For the Hugging Face Spaces with Streamlit alternative when prototyping directly in the HuggingFace ecosystem — both deploy to Spaces, but Gradio’s gr.ChatInterface and the Gradio client SDK for programmatic prediction calls are native to the HuggingFace ecosystem and require zero iframe embedding to call from other applications. The Claude Skills 360 bundle includes Gradio skill sets covering Blocks layouts, streaming text and audio, ChatInterface chatbots, image classifiers, progress bars, and HuggingFace Spaces deployment. Start with the free tier to try ML demo UI generation.