Skip to content

Example 2 : Loadbalancers

Terraform templates to deploy 2 clusters. Each one composed of a load balancer redirecting in a round robin manner the traffic to 3 VMs running a simple nginx server listening on port 80.

cluster_with_loadbalancer.png

Requirements

  1. If you didn't follow the "Getting started", download your clouds.yaml file from the infomaniak manager or Horizon dashboard and copy it to ~/.config/openstack
  2. Copy/Paste the content of 00-providers.tf 01-variables.tf 02-networks.tf 03-sec_groups.tf 04_instances.tf 05-loadbalancer.tf below
  3. Replace "PCP-XXXXXXX" with your project name in 00-providers.tf
  4. Change the SSH key in 01-variables.tf

terraform validate
You should obtain a result like this:
Success! The configuration is valid.

And to finish deploy the resources on the cloud:

terraform apply

You should obtain a result like this:

Apply complete! Resources: 39 added, 0 changed, 0 destroyed.

Outputs:

loadbbalancers-ip = {
  "Public IP cluster1" = [
    "195.15.x.y",
  ]
  "Public IP cluster2" = [
    "195.15.x.y",
  ]
}

Success

Your resources are now successfully deployed with Terrafom on Infomaniak Public Cloud. You can use your web browser and check the public IPs serve the nginx welcome page.

00-providers.tf
# Define required providers
terraform {
  required_providers {
    openstack = {
      source  = "terraform-provider-openstack/openstack"
      version = "1.44.0"
    }
  }
}

# Configure the OpenStack Provider
provider "openstack" {
   cloud = "PCP-XXXXXXX"
   use_octavia = true
}
01-variables.tf
# Public Network
variable "public-net" {
  default = "ext-floating1"
}

# Flavor for cluster1 instances
variable "cluster1_flavor_name" {
  default = "a2-ram4-disk20-perf1"
}

# Flavor for cluster2 instances
variable "cluster2_flavor_name" {
  default = "a2-ram4-disk20-perf1"
}

# Image for instances
variable "image_name" {
  default = "Debian 12 bookworm"
}

variable "ssh_user_name" {
  default = "debian"
}

variable "number_of_instances" {
  type    = number
  default = 3
}

variable "ssh-key" {
  default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5gEibfuRHJ9/9v5xUtiWH773dRaS9XQVdCIiGcxqGSmAsWbBzJBeMpAgRFdTnbf7Ku4AzahAgvYjo67l3Pg+HzGe8kWFHDtSyp8mtN1Uq0UGvVtEwau5WnnGtAB6yyCTld8N7v9dNevQLB1kUTriv3Yc596Ez1ffbt6IEJuGCK5nq2vijER7r85eRwQ9FgDZPPBqda9ppta+v7yhkGUPvJN0M4qnWTNhearv209dYOI5U1BxvrnG6wMeP328THO4Mfl6wabwcrdvvzLlYtrkRkT0Pb2ilLoGYZAtBredc4S0PFP+TUMFKomOLwujyIJ+JKKAgXzXjlyD8CTPBV3Sl"
}

# Create ssh-key
resource "openstack_compute_keypair_v2" "ssh-key" {
  name       = "ssh-key"
  public_key = "${var.ssh-key}"
}
02-networks.tf
#### CREATE NETWORKS ###

########################
# cluster1 network
########################
data "openstack_networking_network_v2" "cluster1" {
  name = "${var.public-net}"
}

resource "openstack_networking_network_v2" "cluster1" {
  name           = "cluster1"
  admin_state_up = "true"
}

# Create Subnet
resource "openstack_networking_subnet_v2" "cluster1" {
  name            = "cluster1"
  network_id      = "${openstack_networking_network_v2.cluster1.id}"
  cidr            = "10.0.0.0/24"
  ip_version      = 4
  dns_nameservers = ["83.166.143.51", "83.166.143.52"]
}

# Create Router
resource "openstack_networking_router_v2" "cluster1" {
  name                = "cluster1"
  admin_state_up      = "true"
  external_network_id = "${data.openstack_networking_network_v2.cluster1.id}"
}

# Plugging subnet and router
resource "openstack_networking_router_interface_v2" "cluster1" {
  router_id = "${openstack_networking_router_v2.cluster1.id}"
  subnet_id = "${openstack_networking_subnet_v2.cluster1.id}"
}

#########################
# Cluster2 network
#########################
data "openstack_networking_network_v2" "cluster2" {
  name = "${var.public-net}"
}

resource "openstack_networking_network_v2" "cluster2" {
  name           = "cluster2"
  admin_state_up = "true"
}

# Create Subnet
resource "openstack_networking_subnet_v2" "cluster2" {
  name            = "cluster2"
  network_id      = "${openstack_networking_network_v2.cluster2.id}"
  cidr            = "11.0.0.0/24"
  ip_version      = 4
  dns_nameservers = ["83.166.143.51", "83.166.143.52"]
}

# Create Router
resource "openstack_networking_router_v2" "cluster2" {
  name                = "cluster2"
  admin_state_up      = "true"
  external_network_id = "${data.openstack_networking_network_v2.cluster2.id}"
}

# Plugging subnet and router
resource "openstack_networking_router_interface_v2" "cluster2" {
  router_id = "${openstack_networking_router_v2.cluster2.id}"
  subnet_id = "${openstack_networking_subnet_v2.cluster2.id}"
}
03-sec_groups.tf
# ############################################
# Create Security Group For cluster1 Instances
# ############################################
resource "openstack_networking_secgroup_v2" "cluster1_sec_group" {
  name        = "cluster1"
  description = "Security group for the cluster1 instances"
}

# Allow SSH
resource "openstack_networking_secgroup_rule_v2" "ssh-cluster1" {
  direction         = "ingress"
  ethertype         = "IPv4"
  protocol          = "tcp"
  port_range_min    = 22
  port_range_max    = 22
  remote_ip_prefix  = "0.0.0.0/0"
  security_group_id = "${openstack_networking_secgroup_v2.cluster1_sec_group.id}"
}

# Allow port 80
resource "openstack_networking_secgroup_rule_v2" "http-cluster1" {
  direction         = "ingress"
  ethertype         = "IPv4"
  protocol          = "tcp"
  port_range_min    = 80
  port_range_max    = 80
  remote_ip_prefix  = "0.0.0.0/0"
  security_group_id = "${openstack_networking_secgroup_v2.cluster1_sec_group.id}"
}

# Allow icmp
resource "openstack_networking_secgroup_rule_v2" "icmp-cluster1" {
  direction         = "ingress"
  ethertype         = "IPv4"
  protocol          = "icmp"
  remote_ip_prefix  = "0.0.0.0/0"
  security_group_id = "${openstack_networking_secgroup_v2.cluster1_sec_group.id}"
}

# ############################################
# Create Security Group For cluster2 Instances
# ############################################
resource "openstack_networking_secgroup_v2" "cluster2_sec_group" {
  name        = "cluster2"
  description = "Security group for the cluster2 instances"
}

# Allow port 22
resource "openstack_networking_secgroup_rule_v2" "ssh-cluster2" {
  direction         = "ingress"
  ethertype         = "IPv4"
  protocol          = "tcp"
  port_range_min    = 22
  port_range_max    = 22
  remote_ip_prefix  = "0.0.0.0/0"
  security_group_id = "${openstack_networking_secgroup_v2.cluster2_sec_group.id}"
}

# Allow HTTP
resource "openstack_networking_secgroup_rule_v2" "http-cluster2" {
  direction         = "ingress"
  ethertype         = "IPv4"
  protocol          = "tcp"
  port_range_min    = 80
  port_range_max    = 80
  remote_ip_prefix  = "0.0.0.0/0"
  security_group_id = "${openstack_networking_secgroup_v2.cluster2_sec_group.id}"
}

# Allow icmp
resource "openstack_networking_secgroup_rule_v2" "icmp-cluster2" {
  direction         = "ingress"
  ethertype         = "IPv4"
  protocol          = "icmp"
  remote_ip_prefix  = "0.0.0.0/0"
  security_group_id = "${openstack_networking_secgroup_v2.cluster2_sec_group.id}"
}
04-instances.tf
############################
# Create cluster 1 Instances
############################

# Create cluster 1 anti-affinity group (VMs in different zones)

resource "openstack_compute_servergroup_v2" "sg-anti-affinity-1" {
  name     = "sg-anti-affinity-1"
  policies = ["soft-anti-affinity"]
}

# Create instances
resource "openstack_compute_instance_v2" "cluster1" {
  count           = var.number_of_instances
  name            = "cluster1-${count.index}"
  flavor_name     = "${var.cluster1_flavor_name}"
  image_name      = "${var.image_name}"
  key_pair        = "${openstack_compute_keypair_v2.ssh-key.name}"
  security_groups = ["${openstack_networking_secgroup_v2.cluster1_sec_group.name}"]

  network {
    uuid = "${openstack_networking_network_v2.cluster1.id}"
  }
  depends_on    = [openstack_networking_subnet_v2.cluster1]
  scheduler_hints {
    group = openstack_compute_servergroup_v2.sg-anti-affinity-1.id
  } 
  user_data = <<-EOF
    #!/bin/bash
    sudo apt update -y
    sudo apt install nginx -y
    sudo systemctl start nginx
    EOF

}

############################
# Create cluster 2 Instances
############################
# Create cluster 2 anti-affinity group (VMs in different zones)

resource "openstack_compute_servergroup_v2" "sg-anti-affinity-2" {
  name     = "sg-anti-affinity-2"
  policies = ["soft-anti-affinity"]
}

# Create instances
resource "openstack_compute_instance_v2" "cluster2" {
  count           = var.number_of_instances
  name            = "cluster2-${count.index}"
  flavor_name     = "${var.cluster2_flavor_name}"
  image_name      = "${var.image_name}"
  key_pair        = "${openstack_compute_keypair_v2.ssh-key.name}"
  security_groups = ["${openstack_networking_secgroup_v2.cluster2_sec_group.name}"]

  network {
    uuid = "${openstack_networking_network_v2.cluster2.id}"
  }
  depends_on    = [openstack_networking_subnet_v2.cluster1]
  scheduler_hints {
    group = openstack_compute_servergroup_v2.sg-anti-affinity-1.id
  } 

  user_data = <<-EOF
    #!/bin/bash
    sudo apt update -y
    sudo apt install nginx -y
    sudo systemctl enable nginx
    sudo systemctl start nginx
    EOF

}
05-loadbalancer.tf
##############################
# CLUSTER1 HTTP LOAD BALANCER
##############################
# Create loadbalancer
resource "openstack_lb_loadbalancer_v2" "cluster1" {
  name          = "cluster1"
  vip_subnet_id = openstack_networking_subnet_v2.cluster1.id
  depends_on    = [openstack_compute_instance_v2.cluster1]
}

resource "openstack_networking_floatingip_v2" "float_cluster1" {
  pool    = "${var.public-net}"
  port_id = "${openstack_lb_loadbalancer_v2.cluster1.vip_port_id}"
  depends_on    = [openstack_lb_loadbalancer_v2.cluster1]
}

# Create listener
resource "openstack_lb_listener_v2" "cluster1" {
  name            = "listener_cluster1"
  protocol        = "HTTP"
  protocol_port   = 80
  loadbalancer_id = openstack_lb_loadbalancer_v2.cluster1.id
  depends_on      = [openstack_lb_loadbalancer_v2.cluster1]
}

# Set load balancer mode to Round Robin between instances
resource "openstack_lb_pool_v2" "cluster1" {
  name        = "pool_cluster1"
  protocol    = "HTTP"
  lb_method   = "ROUND_ROBIN"
  listener_id = openstack_lb_listener_v2.cluster1.id
  depends_on  = [openstack_lb_listener_v2.cluster1]
}

# Add cluster1 instances to pool
resource "openstack_lb_member_v2" "cluster1" {
  count = var.number_of_instances
  address = element(openstack_compute_instance_v2.cluster1.*.access_ip_v4,count.index)
  name = element(openstack_compute_instance_v2.cluster1.*.name,count.index)
  protocol_port = 80
  pool_id       = openstack_lb_pool_v2.cluster1.id
  subnet_id     = openstack_networking_subnet_v2.cluster1.id
  depends_on    = [openstack_lb_pool_v2.cluster1]
}

# Create health monitor checking services listening properly
resource "openstack_lb_monitor_v2" "cluster1" {
  name        = "monitor_cluster1"
  pool_id     = openstack_lb_pool_v2.cluster1.id
  type        = "HTTP"
  delay       = 2
  timeout     = 2
  max_retries = 3
  depends_on  = [openstack_lb_member_v2.cluster1]
}

##############################
# CLUSTER2 HTTP LOAD BALANCER
##############################
resource "openstack_lb_loadbalancer_v2" "cluster2" {
  name          = "cluster2"
  vip_subnet_id = openstack_networking_subnet_v2.cluster2.id
  depends_on    = [openstack_compute_instance_v2.cluster2]
}

resource "openstack_networking_floatingip_v2" "float_cluster2" {
  pool    = "${var.public-net}"
  port_id = "${openstack_lb_loadbalancer_v2.cluster2.vip_port_id}"
  depends_on    = [openstack_lb_loadbalancer_v2.cluster2]
}

# Create listener
resource "openstack_lb_listener_v2" "cluster2" {
  name            = "listener_cluster2"
  protocol        = "HTTP"
  protocol_port   = 80
  loadbalancer_id = openstack_lb_loadbalancer_v2.cluster2.id
  depends_on      = [openstack_lb_loadbalancer_v2.cluster2]
}

# Set load balancer mode to Round Robin between instances
resource "openstack_lb_pool_v2" "cluster2" {
  name        = "pool_cluster2"
  protocol    = "HTTP"
  lb_method   = "ROUND_ROBIN"
  listener_id = openstack_lb_listener_v2.cluster2.id
  depends_on  = [openstack_lb_listener_v2.cluster2]
}

# Add cluster2 instances to pool
resource "openstack_lb_member_v2" "cluster2" {
  count         = var.number_of_instances
  address       = element(openstack_compute_instance_v2.cluster2.*.access_ip_v4, count.index)
  name          = element(openstack_compute_instance_v2.cluster2.*.name,count.index)
  protocol_port = 80
  pool_id       = openstack_lb_pool_v2.cluster2.id
  subnet_id     = openstack_networking_subnet_v2.cluster2.id
  depends_on    = [openstack_lb_pool_v2.cluster2]
}

# Create health monitor checking services listening properly
resource "openstack_lb_monitor_v2" "cluster2" {
  name        = "monitor_cluster2"
  pool_id     = openstack_lb_pool_v2.cluster2.id
  type        = "HTTP"
  delay       = 2
  timeout     = 2
  max_retries = 3
  depends_on  = [openstack_lb_member_v2.cluster2]
}

output "loadbbalancers-ip" {
  description = "Public IP of each Loadbalancer"
  depends_on = [openstack_networking_floatingip_v2.float_cluster1,openstack_networking_floatingip_v2.float_cluster2]
  value = {
    "Public IP cluster1"     = [ "${openstack_networking_floatingip_v2.float_cluster1.address}" ]
    "Public IP cluster2"     = [ "${openstack_networking_floatingip_v2.float_cluster2.address}" ]
  }
}