How To Automate AWS Cloud Using Terraform

Posted By : Ankit Gupta | 27-Aug-2020

In this blog post, you'll learn how to automate AWS cloud using Terraform. 

To create a complete infrastructure of hosting a web page on AWS Cloud using Terraform, follow the steps below :

 

STEP1 : First creating a key-pair and storing it in our local machine. This key will be used to login into our EC2 machine.

STEP2 : Creating a security group which allows Port No 80 (for HTTP) and Port No 22 (for SSH).

STEP3 : Then, launching an EC2 instance with the key-pair and security group created in above steps.

STEP4 : We will now configure our O.S. so that it can be used to host a web page — Install Apache Web Server and start the required services.

STEP 5: Now, we will create an EBS volume and attach it to our instance.

STEP 6: Mount the volume into default directory of web server. The whole process of formatting and mounting the volume will be done using Terraform.

STEP 7: Now, we will clone the Github repo containing the webpage, to our volume’s directory.

STEP 8: We will then create a S3 bucket which will store the images for our web page.

STEP 9: Finally, we will create a CloudFront distribution for faster delivery of the image. At last, the CloudFront URL will be appended into our web page.

So now let’s begin!

NOTE : For performing this practical, you should have Terraform installed in your local machine.

 

provider "aws" {
        region = "ap-south-1"
    profile = "user1profile"
}

variable ami_id {
        default = "ami-0447a12f28fddb066"
}

variable instance_name {
        default = "test-server"
}

variable server-sg_name {    
    default = "server-sgfromtf"
}

variable bucket_name {    
    default = "test-bucket"
}

variable key_name {
    default = "tf1"
}

variable object_name {    
    default = "test.jpg"
}

resource "tls_private_key" "key-pair" {
    algorithm = "RSA"
    rsa_bits = 4096
}

resource "local_file" "private-key" {    
        content = tls_private_key.key-pair.private_key_pem
        filename =     "${var.key_name}.pem"
        file_permission = "0400"
}

resource "aws_key_pair" "key-pair-tf1" {  
        key_name   = var.key_name
        public_key = tls_private_key.key-pair.public_key_openssh
}

resource "aws_security_group" "server-sg" {
    name = var.server-sg_name
    description = "Allow HTTP and SSH inbound traffic"
    
    ingress    {    
        from_port = 80
              to_port = 80
              protocol = "tcp"
              cidr_blocks = ["0.0.0.0/0"]
              ipv6_cidr_blocks = ["::/0"]
          }
          
          ingress {    
              from_port = 22
              to_port = 22
              protocol = "tcp"
              cidr_blocks = ["0.0.0.0/0"]
              ipv6_cidr_blocks = ["::/0"]
          }
          
          ingress {    
              from_port = -1
              to_port = -1
              protocol = "icmp"
              cidr_blocks = ["0.0.0.0/0"]
              ipv6_cidr_blocks = ["::/0"]
          }
          
          egress {
              from_port = 0
              to_port = 0
              protocol = "-1"
              cidr_blocks = ["0.0.0.0/0"]
          }
}

resource "aws_instance" "server" {    
        ami = var.ami_id
    instance_type = "t2.micro"
    key_name = var.key_name
    security_groups = [ aws_security_group.server-sg.name ]
    
    tags = {
        Name = var.instance_name
    }
    
    connection {
            type     = "ssh"
            user     = "ec2-user"
            private_key = file("${var.key_name}.pem")
            host = aws_instance.server.public_ip
      }
    
    provisioner "local-exec" {
        command = "echo ${aws_instance.server.public_ip} > public-ip.txt"
    }
    
    provisioner "remote-exec" {
        
        inline = [
                       "sudo yum install httpd  git -y",
                           "sudo systemctl start httpd",
                           "sudo systemctl enable httpd",
                           "sudo systemctl restart httpd"
                ]
    }
}

resource "aws_ebs_volume" "pendrive" {
        availability_zone = aws_instance.server.availability_zone
        size              = 1

        tags = {
        Name = "p-drive"
  }
}

resource "aws_volume_attachment" "pd_attach" {
        device_name = "/dev/sdh"
        volume_id   = aws_ebs_volume.pendrive.id
        instance_id = aws_instance.server.id
        force_detach = true
}

resource "null_resource" "attach-pd" {
    depends_on = [
        aws_volume_attachment.pd_attach,
    ]
    
    connection {
            type     = "ssh"
            user     = "ec2-user"
            private_key = file("${var.key_name}.pem")
            host = aws_instance.server.public_ip
      }
    
    provisioner "remote-exec" {
        
        inline = [
            "sudo mkfs.ext4  /dev/xvdh",
                        "sudo mount  /dev/xvdh  /var/www/html",
                        "sudo rm -rf /var/www/html/*",
                        "sudo git clone https://github.com/*****/***.git /var/www/html/"
        
        ]
    }
}

resource "aws_s3_bucket" "picture1997-bucket" {
        depends_on = [
        null_resource.attach-pd,
    ]
    
    bucket = var.bucket_name
    acl = "public-read"
    
    provisioner "local-exec" {
    
           command = "git clone https://github.com/attri97/server-picture.git server-picture10001"
    }
    
    provisioner "local-exec" {
    
        when = destroy
        command = "rmdir -rf server-picture10001"
    }    
}

resource "aws_s3_bucket_object" "picture-upload" {
        key = var.object_name
        bucket = aws_s3_bucket.picture1997-bucket.bucket
        acl    = "public-read"
        source = "server-picture10001/chanakya.jpg"
}

locals {
    s3_origin_id = "S3-${aws_s3_bucket.picture1997-bucket.bucket}"
}

resource "aws_cloudfront_distribution" "cloudfront" {
    enabled = true
    is_ipv6_enabled = true
    
    origin {
        domain_name = aws_s3_bucket.picture1997-bucket.bucket_domain_name
        origin_id = local.s3_origin_id
    }
    
    default_cache_behavior {
            allowed_methods  = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"]
            cached_methods   = ["GET", "HEAD"]
            target_origin_id = local.s3_origin_id

            forwarded_values {
                  query_string = false

                  cookies {
                    forward = "none"
                  }
            }
            
            viewer_protocol_policy = "allow-all"
        }
        
        restrictions {
            geo_restriction {
                restriction_type = "none"
            }
        }
        
        viewer_certificate {
    
            cloudfront_default_certificate = true
      }
      
      connection {
            type     = "ssh"
            user     = "ec2-user"
            private_key = file("${var.key_name}.pem")
            host = aws_instance.server.public_ip
      }
      
      provisioner "remote-exec" {
          
          inline = [
              
                        "sudo systemctl restart httpd",
              "sudo su << EOF",
                        "echo \"<img src='http://${self.domain_name}/${aws_s3_bucket_object.picture-upload.key}' width='300' height='600'>\" >> /var/www/html/guru.html",
                        "sudo systemctl restart httpd",
                    "EOF",    
          ]
      }
}

output "Instance-Public-IP" {
    value = aws_instance.server.public_ip
}

 

About Author

Author Image
Ankit Gupta

Ankit is a Redhat Certified System Administrator and Redhat Certified Engineer. He is interested in learning new DevOps tools . He likes Linux, DevOps , Automation & Cloud Computing. He always try to complete the assigned tasks within in the given time.

Request for Proposal

Name is required

Comment is required

Sending message..