Use Terraform To Deploy AWS Resources

Terraform is a product by Hashicorp that uses Infrastructure as Code (IaC) to provision cloud infrastructure.
AWS EC2 within public subnet of VPC via Internet Gateway can curl www.google.com

Prerequisites

  1. AWS credentials: AWS_ACCOUNT_ID, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY
  2. Terraform installed in your system
  3. Install AWS CLI

Prepare terraform script

In one directory: create provider.tf, variables.tf, main.tf, and output.tf

Configure terraform providers

Edit in provider.tf, providers’ versions such as AWS, AZURE, GCP, OCI, and other company which provides cloud solution.

1
2
3
4
5
6
7
8
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.19.0"
}
}
}

Deploy resources

In main.py:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
provider "aws" {
region = "us-east-1" # Change to your preferred region
}

# Generate a new RSA private key
resource "tls_private_key" "ssh_key" {
algorithm = "RSA"
rsa_bits = 4096
}

# Save the private key locally (optional but useful for SSH access)
resource "local_file" "private_key" {
content = tls_private_key.ssh_key.private_key_pem
filename = "private.pem"
file_permission = "0600" # 6(-rw-), 4(-r-), 7(-rwx-)
}

# Upload the public key to AWS
resource "aws_key_pair" "generated_key" {
key_name = "ubuntu-ssh-key" # Name for the AWS key pair
public_key = tls_private_key.ssh_key.public_key_openssh
}

# Create a VPC with public subnet
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
enable_dns_support = true
tags = {
Name = "Ubuntu-VPC"
}
}

# Create an Internet Gateway
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.main.id
tags = {
Name = "IGW"
}
}

# Public subnet (for EC2 instance)
resource "aws_subnet" "public" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true # Assign public IP automatically
tags = {
Name = "Public-Subnet"
}
}

# Route table for public traffic,
resource "aws_route_table" "public" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
tags = {
Name = "Public-Route-Table"
}
}

# Associate subnet with route table
resource "aws_route_table_association" "public" {
subnet_id = aws_subnet.public.id
route_table_id = aws_route_table.public.id
}

# Security group allowing SSH/HTTP/HTTPS
resource "aws_security_group" "allow_web" {
name = "allow_web_traffic"
description = "Allow inbound web traffic"
vpc_id = aws_vpc.main.id

ingress {
description = "SSH"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"] # Warning: Restrict this in production!
}

ingress {
description = "HTTPS"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}

egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}

# Define OS image
data "aws_ami" "ubuntu" {
most_recent = true
owners = ["099720109477"] # Canonical

filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-*"]
}
}

# Deploy EC2 instance
resource "aws_instance" "web_server" {
ami = data.aws_ami.ubuntu.id # Correct Ubuntu AMI
instance_type = "t3.micro"
subnet_id = aws_subnet.public.id
security_groups = [aws_security_group.allow_web.id]
key_name = aws_key_pair.generated_key.key_name

# Connection for remote-exec
connection {
type = "ssh"
user = "ubuntu"
private_key = tls_private_key.ssh_key.private_key_pem
host = self.public_ip
}

# Install packages using remote-exec (better than local-exec)
provisioner "remote-exec" {
inline = [
"sudo apt-get update -y",
"sudo apt-get install -y curl iperf3"
]
}

tags = {
Name = "Ubuntu-Server"
}
}

# Output the public IP
output "public_ip" {
value = aws_instance.web_server.public_ip
}

Run/destroy terraform

1
2
3
4
$ terraform init
$ terraform plan
$ terraform apply
$ terraform destory

Troubleshooot

After terraform apply successfully, try to ssh into public VM:

1
$ ssh -i private.pem ubuntu@PUBLIC_IP

Inside of public VM:

1
curl -s -I https://www.google.com | grep HTTP

Automation

Here’s a pytest implementation that automates the entire workflow, including Terraform deployment, SSH verification, and internet connectivity checks:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import subprocess
import paramiko
import pytest
import time

# Fixture for Terraform deployment (module-scoped)
@pytest.fixture(scope="module")
def vm_ip():
# Deploy infrastructure
subprocess.run(["terraform", "init"], check=True)
subprocess.run(["terraform", "apply", "-auto-approve"], check=True)

# Get VM IP
ip = subprocess.run(
["terraform", "output", "-raw", "public_ip"],
capture_output=True,
text=True,
check=True
).stdout.strip()

yield ip # Test runs here

# Teardown
subprocess.run(["terraform", "destroy", "-auto-approve"], check=True)

# Test SSH and internet connectivity
def test_vm_internet_access(vm_ip):
"""Verify SSH access and ability to reach Google.com"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
key_path = "private.pem"

try:
# Connect with retry logic
for attempt in range(3):
try:
ssh.connect(
vm_ip,
username="ubuntu",
key_filename=key_path,
timeout=10
)
break
except Exception as e:
if attempt == 2:
pytest.fail(f"SSH connection failed after 3 attempts: {e}")
time.sleep(5)

# Test curl to Google
stdin, stdout, stderr = ssh.exec_command(
"curl -s -o /dev/null -w '%{http_code}' https://www.google.com"
)
assert stdout.read().decode().strip() == "200", "Failed to reach Google.com"

finally:
ssh.close()
Author: Yu
Link: https://yurihe.github.io/2025/03/31/3.tf_case/
Copyright Notice: All articles in this blog are licensed under CC BY-NC-SA 4.0 unless stating additionally.