Terraform manages infrastructure declaratively — declare what you want, and Terraform figures out what to create, change, or destroy. The patterns that matter for teams: reusable modules with well-defined interfaces, remote state with locking, separate workspaces or directories per environment, and automated testing. Claude Code writes Terraform modules, sets up state backends, and implements drift detection workflows.
CLAUDE.md for Terraform Projects
## Terraform Stack
- Version: ~> 1.8 (pinned in required_version)
- State backend: S3 + DynamoDB for locking (AWS) or GCS (GCP)
- Module structure: modules/ for reusable, envs/ for environment composition
- Provider versions pinned in required_providers (no floating ~> for major versions)
- Naming: {environment}-{region}-{resource} e.g., prod-us-east-1-orders-db
- Tags: all resources tagged with environment, team, cost-center, managed-by=terraform
- No hardcoded values — everything in variables with descriptions and validation
- Sensitive outputs marked as sensitive = true
- terraform fmt and terraform validate run in CI before plan
Module Structure
infrastructure/
modules/
rds-postgres/
main.tf
variables.tf
outputs.tf
README.md
eks-cluster/
redis-cluster/
vpc/
envs/
staging/
main.tf
terraform.tfvars
backend.tf
production/
main.tf
terraform.tfvars
backend.tf
Reusable Module: RDS PostgreSQL
# modules/rds-postgres/variables.tf
variable "identifier" {
description = "Unique identifier for this RDS instance"
type = string
validation {
condition = can(regex("^[a-z0-9-]+$", var.identifier))
error_message = "Identifier must be lowercase alphanumeric with hyphens."
}
}
variable "environment" {
description = "Deployment environment (development, staging, production)"
type = string
validation {
condition = contains(["development", "staging", "production"], var.environment)
error_message = "Environment must be development, staging, or production."
}
}
variable "instance_class" {
description = "RDS instance class"
type = string
default = "db.t3.medium"
}
variable "multi_az" {
description = "Enable Multi-AZ for high availability (required for production)"
type = bool
default = false
}
variable "db_name" {
description = "Initial database name"
type = string
}
variable "vpc_id" {
type = string
}
variable "subnet_ids" {
description = "List of subnet IDs for the DB subnet group (private subnets)"
type = list(string)
}
variable "allowed_security_group_ids" {
description = "Security groups allowed to connect to RDS"
type = list(string)
default = []
}
# modules/rds-postgres/main.tf
locals {
name_prefix = "${var.environment}-${var.identifier}"
# Production requires Multi-AZ
multi_az = var.environment == "production" ? true : var.multi_az
}
resource "random_password" "db_password" {
length = 32
special = true
override_special = "!#$%^&*()-_=+[]{}<>:?"
}
resource "aws_secretsmanager_secret" "db_credentials" {
name = "${local.name_prefix}-db-credentials"
recovery_window_in_days = var.environment == "production" ? 30 : 0
tags = local.common_tags
}
resource "aws_secretsmanager_secret_version" "db_credentials" {
secret_id = aws_secretsmanager_secret.db_credentials.id
secret_string = jsonencode({
username = "dbadmin"
password = random_password.db_password.result
host = aws_db_instance.this.address
port = aws_db_instance.this.port
dbname = var.db_name
})
}
resource "aws_db_subnet_group" "this" {
name = local.name_prefix
subnet_ids = var.subnet_ids
tags = local.common_tags
}
resource "aws_security_group" "rds" {
name = "${local.name_prefix}-rds"
vpc_id = var.vpc_id
description = "RDS security group for ${local.name_prefix}"
dynamic "ingress" {
for_each = var.allowed_security_group_ids
content {
from_port = 5432
to_port = 5432
protocol = "tcp"
source_security_group_id = ingress.value
}
}
tags = local.common_tags
}
resource "aws_db_instance" "this" {
identifier = local.name_prefix
instance_class = var.instance_class
engine = "postgres"
engine_version = "16.1"
db_name = var.db_name
username = "dbadmin"
password = random_password.db_password.result
db_subnet_group_name = aws_db_subnet_group.this.name
vpc_security_group_ids = [aws_security_group.rds.id]
# Production hardening
multi_az = local.multi_az
deletion_protection = var.environment == "production"
skip_final_snapshot = var.environment != "production"
final_snapshot_identifier = var.environment == "production" ? "${local.name_prefix}-final" : null
backup_retention_period = var.environment == "production" ? 35 : 7
backup_window = "03:00-04:00"
maintenance_window = "sun:04:00-sun:05:00"
enabled_cloudwatch_logs_exports = ["postgresql", "upgrade"]
performance_insights_enabled = var.environment == "production"
storage_encrypted = true
tags = local.common_tags
}
locals {
common_tags = {
Environment = var.environment
ManagedBy = "terraform"
Module = "rds-postgres"
Identifier = var.identifier
}
}
# modules/rds-postgres/outputs.tf
output "endpoint" {
description = "RDS instance endpoint"
value = aws_db_instance.this.endpoint
}
output "secret_arn" {
description = "Secrets Manager ARN containing DB credentials"
value = aws_secretsmanager_secret.db_credentials.arn
}
output "security_group_id" {
description = "RDS security group ID — allow this in app security groups"
value = aws_security_group.rds.id
}
output "connection_string" {
description = "PostgreSQL connection string (password from Secrets Manager)"
value = "postgresql://dbadmin@${aws_db_instance.this.endpoint}/${var.db_name}"
sensitive = false
}
Environment Composition
# envs/production/main.tf
terraform {
required_version = "~> 1.8"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.40"
}
}
}
# Remote state backend
# backend.tf
terraform {
backend "s3" {
bucket = "myorg-terraform-state"
key = "production/terraform.tfstate"
region = "us-east-1"
encrypt = true
dynamodb_table = "terraform-state-lock"
}
}
# Data source: read staging outputs (VPC created there)
data "terraform_remote_state" "network" {
backend = "s3"
config = {
bucket = "myorg-terraform-state"
key = "network/terraform.tfstate"
region = "us-east-1"
}
}
module "orders_db" {
source = "../../modules/rds-postgres"
identifier = "orders"
environment = "production"
db_name = "orders"
instance_class = "db.r6g.large"
multi_az = true # Module enforces this, but explicit is better
vpc_id = data.terraform_remote_state.network.outputs.vpc_id
subnet_ids = data.terraform_remote_state.network.outputs.private_subnet_ids
allowed_security_group_ids = [module.order_api.security_group_id]
}
module "order_api" {
source = "../../modules/ecs-service"
name = "order-api"
environment = "production"
image = "123456789.dkr.ecr.us-east-1.amazonaws.com/order-api:${var.image_tag}"
environment_variables = {
DATABASE_SECRET_ARN = module.orders_db.secret_arn
PORT = "8080"
}
}
Drift Detection + CI Workflow
# .github/workflows/terraform.yml
name: Terraform
on:
push:
branches: [main]
paths: ['infrastructure/**']
pull_request:
paths: ['infrastructure/**']
schedule:
- cron: '0 8 * * *' # Daily drift detection
jobs:
plan:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: hashicorp/setup-terraform@v3
with:
terraform_version: "~1.8"
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::123456789:role/TerraformCI
aws-region: us-east-1
- name: Terraform Format Check
run: terraform fmt -check -recursive
working-directory: infrastructure
- name: Terraform Init
run: terraform init
working-directory: infrastructure/envs/production
- name: Terraform Validate
run: terraform validate
working-directory: infrastructure/envs/production
- name: Terraform Plan
id: plan
run: terraform plan -detailed-exitcode -out=tfplan
working-directory: infrastructure/envs/production
continue-on-error: true
# Exit code 2 = changes detected (not an error)
# Exit code 0 = no changes, Exit code 1 = error
- name: Comment PR with plan
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const output = `#### Terraform Plan 📋
Exit Code: ${{ steps.plan.outputs.exitcode }}
<details><summary>Show Plan</summary>
\`\`\`${{ steps.plan.outputs.stdout }}\`\`\`
</details>`;
github.rest.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, body: output });
- name: Terraform Apply
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
run: terraform apply -auto-approve tfplan
working-directory: infrastructure/envs/production
For the Kubernetes deployments that run on infrastructure created by Terraform, see the zero-downtime deployments guide. For securing the secrets that Terraform creates in Secrets Manager, the cryptography guide covers envelope encryption patterns. The Claude Skills 360 bundle includes infrastructure skill sets covering Terraform modules, remote state, and multi-environment patterns. Start with the free tier to try infrastructure module generation.