CTranslate2 is a fast inference engine for encoder-decoder and decoder-only transformers. pip install ctranslate2. Convert model: ct2-opus-mt-en-fr-converter --model Helsinki-NLP/opus-mt-en-fr --output_dir opus-mt-en-fr-ct2 --quantization int8. Python convert: import ctranslate2, ctranslate2.converters.OpusMTConverter("Helsinki-NLP/opus-mt-en-fr").convert("./opus-mt-ct2", quantization="int8"). Translate: translator = ctranslate2.Translator("./opus-mt-ct2", device="cpu", inter_threads=4), source = translator.translate_batch([["Hello", "world"]]), output = source[0].hypotheses[0]. Tokenize: from transformers import AutoTokenizer, tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-fr"), tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(text)). Generator: generator = ctranslate2.Generator("./llama-ct2", device="cuda"), results = generator.generate_batch([tokens], max_new_tokens=256, beam_size=1, sampling_temperature=0.7). Encoder: encoder = ctranslate2.Encoder("./bert-ct2", device="cpu"), output = encoder.forward_batch([tokens]), pooled = output.last_hidden_state[0]. compute_type="auto" selects best precision. compute_type="int8_float16" for GPU mixed. ctranslate2.converters.transformers.TransformersConverter("model_id") for HF models. Async: AsyncTranslator for non-blocking calls. Faster-whisper: from faster_whisper import WhisperModel, model = WhisperModel("large-v3", device="cpu", compute_type="int8"), segments, info = model.transcribe("audio.mp3", beam_size=5). Claude Code generates CTranslate2 conversion scripts, Translator/Generator pipelines, batched inference, and faster-whisper transcription code.
CLAUDE.md for CTranslate2
## CTranslate2 Stack
- Version: ctranslate2 >= 4.0
- Convert: OpusMTConverter | TransformersConverter(model_id) → .convert(output_dir, quantization="int8")
- Translate: Translator(model_path, device, inter_threads) → translate_batch([[tokens]])
- Generate: Generator(model_path, device) → generate_batch([tokens], max_new_tokens, beam_size)
- Encode: Encoder(model_path, device) → forward_batch([tokens]) → .last_hidden_state
- Quantize: compute_type="int8" | "int8_float16" | "float16" | "bfloat16" | "auto"
- Async: AsyncTranslator / AsyncGenerator for non-blocking parallel calls
- Whisper: faster_whisper.WhisperModel(size, device, compute_type) → transcribe(audio, beam_size)
CTranslate2 Inference Pipeline
# inference/ctranslate2_pipeline.py — fast transformer inference with CTranslate2
from __future__ import annotations
import os
import time
from pathlib import Path
from typing import Optional
import ctranslate2
import numpy as np
# ── 1. Model conversion ───────────────────────────────────────────────────────
def convert_translation_model(
hf_model_id: str = "Helsinki-NLP/opus-mt-en-fr",
output_dir: str = "./opus-mt-ct2",
quantization: str = "int8", # int8 | int16 | float16 | bfloat16 | none
force: bool = False,
) -> str:
"""
Convert a Hugging Face seq2seq model to CTranslate2 format.
Quantization reduces model size by ~4x (int8) with negligible quality loss.
"""
output_path = Path(output_dir)
if output_path.exists() and not force:
print(f"Model already converted at {output_dir}")
return output_dir
print(f"Converting {hf_model_id} → {output_dir} (quantization={quantization})...")
converter = ctranslate2.converters.OpusMTConverter(hf_model_id)
converter.convert(output_dir, quantization=quantization, force=force)
print(f"Conversion complete: {output_path}")
return output_dir
def convert_causal_model(
hf_model_id: str = "meta-llama/Llama-3.2-1B",
output_dir: str = "./llama-ct2",
quantization: str = "int8_float16",
) -> str:
"""Convert a causal LM from Hugging Face to CTranslate2 Generator format."""
output_path = Path(output_dir)
if output_path.exists():
print(f"Model already at {output_dir}")
return output_dir
converter = ctranslate2.converters.TransformersConverter(
hf_model_id,
low_cpu_mem_usage=True,
)
converter.convert(output_dir, quantization=quantization, force=True)
return output_dir
def convert_encoder_model(
hf_model_id: str = "bert-base-uncased",
output_dir: str = "./bert-ct2",
quantization: str = "int8",
) -> str:
"""Convert a BERT-style encoder for feature extraction."""
converter = ctranslate2.converters.TransformersConverter(hf_model_id)
converter.convert(output_dir, quantization=quantization, force=True)
return output_dir
# ── 2. Translation pipeline ───────────────────────────────────────────────────
class TranslationPipeline:
"""
High-throughput translation with CTranslate2 Translator.
Supports batching, beam search, and multi-threaded CPU inference.
"""
def __init__(
self,
model_path: str,
src_tokenizer: str, # HuggingFace tokenizer name or path
tgt_tokenizer: str = None,
device: str = "cpu", # "cpu" | "cuda" | "auto"
inter_threads: int = 4, # Parallel request threads
intra_threads: int = 0, # OpenMP threads per request (0=auto)
compute_type: str = "auto",
):
from transformers import AutoTokenizer
self.src_tokenizer = AutoTokenizer.from_pretrained(src_tokenizer)
self.tgt_tokenizer = AutoTokenizer.from_pretrained(
tgt_tokenizer or src_tokenizer
)
self.translator = ctranslate2.Translator(
model_path,
device=device,
inter_threads=inter_threads,
intra_threads=intra_threads,
compute_type=compute_type,
)
def _tokenize(self, text: str) -> list[str]:
ids = self.src_tokenizer.encode(text, add_special_tokens=True)
tokens = self.src_tokenizer.convert_ids_to_tokens(ids)
return tokens
def _detokenize(self, tokens: list[str]) -> str:
ids = self.tgt_tokenizer.convert_tokens_to_ids(tokens)
return self.tgt_tokenizer.decode(ids, skip_special_tokens=True)
def translate(
self,
text: str,
beam_size: int = 4,
max_input_len: int = 512,
max_decoding_len: int = 512,
) -> str:
"""Translate a single text."""
source_tokens = [self._tokenize(text)]
results = self.translator.translate_batch(
source_tokens,
beam_size=beam_size,
max_input_length=max_input_len,
max_decoding_length=max_decoding_len,
)
return self._detokenize(results[0].hypotheses[0])
def translate_batch(
self,
texts: list[str],
beam_size: int = 4,
batch_size: int = 32,
) -> list[str]:
"""Translate a batch of texts."""
all_tokens = [self._tokenize(t) for t in texts]
all_results = []
for i in range(0, len(all_tokens), batch_size):
batch = all_tokens[i : i + batch_size]
results = self.translator.translate_batch(
batch,
beam_size=beam_size,
max_batch_size=batch_size,
)
all_results.extend(
self._detokenize(r.hypotheses[0]) for r in results
)
return all_results
def translate_with_scores(
self,
texts: list[str],
num_hypotheses: int = 3,
beam_size: int = 5,
) -> list[list[tuple[str, float]]]:
"""Return top-N translations with log-probability scores."""
all_tokens = [self._tokenize(t) for t in texts]
results = self.translator.translate_batch(
all_tokens,
beam_size=beam_size,
num_hypotheses=num_hypotheses,
return_scores=True,
)
output = []
for result in results:
hyps = [
(self._detokenize(h), s)
for h, s in zip(result.hypotheses, result.scores)
]
output.append(hyps)
return output
# ── 3. Causal LM generation ───────────────────────────────────────────────────
class CausalGenerator:
"""
Text generation with CTranslate2 Generator.
Works with LLaMA, GPT-2, OPT, and other decoder-only models.
"""
def __init__(
self,
model_path: str,
tokenizer_id: str, # HF tokenizer name or path
device: str = "cuda",
compute_type: str = "int8_float16", # best for GPU
inter_threads: int = 1,
):
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.generator = ctranslate2.Generator(
model_path,
device=device,
device_index=0,
compute_type=compute_type,
inter_threads=inter_threads,
)
def generate(
self,
prompt: str,
max_tokens: int = 256,
temperature: float = 0.7,
top_p: float = 0.9,
top_k: int = 0, # 0 = disabled
beam_size: int = 1, # 1 = sampling, >1 = beam search
repetition_penalty: float = 1.05,
) -> str:
tokens = self.tokenizer.convert_ids_to_tokens(
self.tokenizer.encode(prompt)
)
results = self.generator.generate_batch(
[tokens],
max_new_tokens=max_tokens,
sampling_temperature=temperature,
sampling_topp=top_p,
sampling_topk=top_k if top_k > 0 else None,
beam_size=beam_size,
no_repeat_ngram_size=4 if repetition_penalty > 1.0 else 0,
return_log_probs=False,
)
output_tokens = results[0].sequences[0]
output_ids = self.tokenizer.convert_tokens_to_ids(output_tokens)
return self.tokenizer.decode(output_ids, skip_special_tokens=True)
def generate_batch(
self,
prompts: list[str],
max_tokens: int = 256,
temperature: float = 0.7,
) -> list[str]:
"""Generate completions for multiple prompts in parallel."""
all_tokens = [
self.tokenizer.convert_ids_to_tokens(self.tokenizer.encode(p))
for p in prompts
]
results = self.generator.generate_batch(
all_tokens,
max_new_tokens=max_tokens,
sampling_temperature=temperature,
max_batch_size=len(prompts),
)
outputs = []
for result in results:
ids = self.tokenizer.convert_tokens_to_ids(result.sequences[0])
text = self.tokenizer.decode(ids, skip_special_tokens=True)
outputs.append(text)
return outputs
# ── 4. Feature extraction (BERT encoder) ─────────────────────────────────────
class EncoderPipeline:
"""Extract sentence embeddings using a BERT-style CTranslate2 Encoder."""
def __init__(
self,
model_path: str,
tokenizer_id: str,
device: str = "cpu",
compute_type: str = "int8",
):
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
self.encoder = ctranslate2.Encoder(
model_path,
device=device,
compute_type=compute_type,
inter_threads=4,
)
def encode(self, text: str, pooling: str = "mean") -> np.ndarray:
"""Encode a single text to a dense vector."""
return self.encode_batch([text], pooling)[0]
def encode_batch(
self,
texts: list[str],
pooling: str = "mean", # "mean" | "cls"
) -> np.ndarray:
"""
Encode multiple texts to dense vectors.
Returns array of shape (N, hidden_dim).
"""
all_tokens = []
all_lengths = []
for text in texts:
encoding = self.tokenizer(
text,
max_length=512,
truncation=True,
add_special_tokens=True,
)
tokens = self.tokenizer.convert_ids_to_tokens(encoding["input_ids"])
all_tokens.append(tokens)
all_lengths.append(len(tokens))
output = self.encoder.forward_batch(all_tokens)
hidden = np.array(output.last_hidden_state) # (N, seq_len, hidden)
if pooling == "cls":
embeddings = hidden[:, 0, :] # [CLS] token
else: # mean pooling
embeddings = np.array([
hidden[i, :all_lengths[i], :].mean(axis=0)
for i in range(len(texts))
])
# L2 normalize
norms = np.linalg.norm(embeddings, axis=1, keepdims=True)
return embeddings / np.maximum(norms, 1e-9)
def similarity(self, text_a: str, text_b: str) -> float:
"""Cosine similarity between two texts."""
embeds = self.encode_batch([text_a, text_b])
return float(np.dot(embeds[0], embeds[1]))
# ── 5. Faster-Whisper transcription ──────────────────────────────────────────
def transcribe_audio(
audio_path: str,
model_size: str = "base", # tiny, base, small, medium, large-v3
device: str = "cpu",
compute_type: str = "int8",
language: str = None, # None = auto-detect
beam_size: int = 5,
) -> dict:
"""
Transcribe audio with faster-whisper (CTranslate2-backed Whisper).
pip install faster-whisper
"""
from faster_whisper import WhisperModel
model = WhisperModel(
model_size,
device=device,
compute_type=compute_type,
cpu_threads=os.cpu_count() or 4,
)
segments, info = model.transcribe(
audio_path,
beam_size=beam_size,
language=language,
vad_filter=True, # Skip silent sections
vad_parameters={"min_silence_duration_ms": 500},
)
full_text = " ".join(s.text.strip() for s in segments)
return {
"text": full_text,
"language": info.language,
"duration": info.duration,
"segments": [
{"start": s.start, "end": s.end, "text": s.text}
for s in segments
],
}
# ── 6. Benchmark ──────────────────────────────────────────────────────────────
def benchmark_translation(model_path: str, tokenizer_id: str, n_sentences: int = 100):
"""Benchmark translation throughput (sentences/sec)."""
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
translator = ctranslate2.Translator(
model_path,
device="cpu",
inter_threads=4,
compute_type="int8",
)
# Generate synthetic sentences
sentences = [f"This is test sentence number {i} for benchmarking." for i in range(n_sentences)]
tokens = [
tokenizer.convert_ids_to_tokens(tokenizer.encode(s))
for s in sentences
]
t0 = time.perf_counter()
results = translator.translate_batch(tokens, beam_size=2, max_batch_size=32)
elapsed = time.perf_counter() - t0
print(f"Translated {n_sentences} sentences in {elapsed:.2f}s")
print(f"Throughput: {n_sentences / elapsed:.0f} sentences/sec")
print(f"Memory: {ctranslate2.get_cuda_device_count()} CUDA devices")
return n_sentences / elapsed
if __name__ == "__main__":
# Whisper transcription demo (fastest to run, no conversion needed)
print("faster-whisper demo:")
result = transcribe_audio(
"audio.wav", # Replace with a real audio file
model_size="base",
device="cpu",
compute_type="int8",
)
print(f"Transcription: {result['text'][:100]}...")
print(f"Language: {result['language']}, Duration: {result['duration']:.1f}s")
For the ONNX Runtime alternative when needing cross-platform inference across Windows, Linux, and mobile with the broadest hardware accelerator support (CoreML, DirectML, TensorRT, OpenVINO) — ONNX Runtime covers the widest deployment targets while CTranslate2 provides the simplest conversion path from Hugging Face seq2seq and causal LMs with built-in int8 quantization and the fastest CPU inference specifically for encoder-decoder translation models. For the Hugging Face Transformers direct inference alternative when needing full model flexibility, custom attention patterns, and easy access to any model on the Hub without format conversion — HF Transformers runs any architecture while CTranslate2’s converted models run 2-4x faster on CPU with int8 quantization and consume 75% less memory, making it the right choice for production latency-sensitive translation, summarization, and Whisper transcription deployments. The Claude Skills 360 bundle includes CTranslate2 skill sets covering model conversion, translation pipelines, causal generation, BERT feature extraction, faster-whisper transcription, batched inference, and throughput benchmarking. Start with the free tier to try fast transformer inference code generation.