<​Task One Complete>_Hybrid Multi Cloud Computing

<Task One Complete>_Hybrid Multi Cloud Computing

TASK 1:

Have to create/launch Application using Terraform
1. Create the key and security group which allow the port 80.
2. Launch EC2 instance.
3. In this Ec2 instance use the key and security group which we have created in step 1.
4. Launch one Volume (EBS) and mount that volume into /var/www/html
5. Developer have uploded the code into github repo also the repo has some images.
6. Copy the github repo code into /var/www/html
7. Create S3 bucket, and copy/deploy the images from github repo into the s3 bucket and change the permission to public readable.
8 Create a Cloudfront using s3 bucket(which contains images) and use the Cloudfront URL to update in code in /var/www/html
*Optional*
1) Those who are familiar with jenkins or are in devops AL have to integrate jenkins in this task wherever you feel can be integrated
2) create snapshot of ebs
Above task should be done using terraform

LETS_BEGIN__

First of all.........

What is Terraform ?

Terraform is a tool for building, changing, and versioning infrastructure safely and efficiently. It's an open-source infrastructure as code software tool created by HashiCorp. It enables users to define and provision a datacenter infrastructure using a high-level configuration language known as Hashicorp Configuration Language (HCL), or optionally JSON. Terrafrom is the one which work on both the cloud (public and private) and also helps in Multi Cloud & Hybrid cloud concepts.

First of all we have to install Terraform in our local mahine.

Terraform download link

No alt text provided for this image

Terraform version :

No alt text provided for this image

Writing code in Notepad <task1.tf>

>> Declaring cloud provider

provider "aws" {
	 region     = "ap-south-1"
     profile = "sachin"
     
     
}      

>>creating key pair

resource "tls_private_key" "task-2-key" { 
  algorithm   = "RSA"
  rsa_bits = "2048"
}



resource "aws_key_pair" "private_key" {
  depends_on = [ tls_private_key.task-2-key, ]
  key_name   = "mytask1key"
  public_key = tls_private_key.task-2-key.public_key_openssh
}v

>>To Create Security Group

resource "aws_security_group" "allow_http" {
depends_on = [tls_private_key.task-2-key,]
       name        = "allow_http"
       description = "Allows http and ssh"
       vpc_id      = "vpc-bac3ded2"
        
  ingress {
        description = "HTTP allow"
        from_port   = 0
        to_port     = 80
        protocol    = "tcp"
        cidr_blocks = ["0.0.0.0/0"]
    }       
  ingress {
        description = "ssh"
        from_port   = 22
        to_port     = 22
        protocol    = "tcp"
        cidr_blocks = ["0.0.0.0/0"]
    }       
  egress {
        from_port   = 0
        to_port     = 0
        protocol    = "-1"
        cidr_blocks = ["0.0.0.0/0"]
    }   
  tags = {
         Name = "allow_HTTP"
    }       
}


>>To Create EC2 instance

resource "aws_instance" "instance_ec2" {
depends_on = [tls_private_key.task-2-key,
               aws_security_group.allow_http,
                ]
        


        


    ami      = "ami-0447a12f28fddb066"
    instance_type = "t2.micro"
    key_name = "mytask1key"
    security_groups = ["allow_http"] 
    tags = {
    Name = "sac-ec2"
    }
        


connection {
    type     = "ssh"
    user     = "ec2-user"
    private_key = tls_private_key.task-2-key.private_key_pem
    host     = "${aws_instance.instance_ec2.public_ip}"
    }
        
provisioner "remote-exec"  {
    inline = [
        "sudo yum install httpd php git -y",                 
         "sudo systemctl start httpd",                  
         "sudo systemctl enable httpd",                   
     ]
   }
}

>>Create EBS volume

resource "aws_ebs_volume" "ebs_volume" {
depends_on = [aws_instance.instance_ec2,]
          availability_zone = "${aws_instance.instance_ec2.availability_zone}"
          size              = 1
          tags = {
            Name = "ebs_volume1"
 }
}

>>Create a S3 Bucket

resource "aws_s3_bucket" "bucket1" {
depends_on = [aws_ebs_volume.ebs_volume,]
  bucket = "justlookingforsomeuniquenamewithoutspaces"
  force_destroy = true
  acl    = "public-read"
  policy = <<POLICY
{
  "Version": "2012-10-17",
  "Id": "MYBUCKETPOLICY",
  "Statement": [
    {
      "Sid": "PublicReadGetObject",
      "Effect": "Allow",
      "Principal": "*",
      "Action": "s3:*",
      "Resource": "arn:aws:s3:::justlookingforsomeuniquenamewithoutspaces/*"
    }
  ]
}
POLICY
    
}

>> To put object in S3 bucket

resource "aws_s3_bucket_object" "object" {


  bucket = aws_s3_bucket.bucket1.id
  key    = "img1"
depends_on = [aws_s3_bucket.bucket1,
        ]
}
locals{
  s3_origin_id = "aws_s3_bucket.bucket1.id"
  depends_on = [aws_s3_bucket.bucket1,
        ]
}

>>To Attach EBS volume (Put "force_detach = true" if don't want error)

resource "aws_volume_attachment" "ebs_att" {
    depends_on = [aws_ebs_volume.ebs_volume,
                aws_instance.instance_ec2,
                ]
    


    device_name = "/dev/sdh"
    volume_id   = "${aws_ebs_volume.ebs_volume.id}"
    instance_id = "${aws_instance.instance_ec2.id}"
    force_detach = true
}

>>Null Resource to do some Configuration(mount that volume into /var/www/html)

resource "null_resource" "null1" {
        
depends_on = [aws_volume_attachment.ebs_att,
               ]    
        connection {
           type     = "ssh"
           user     = "ec2-user"
           private_key = tls_private_key.task-2-key.private_key_pem
           host     = aws_instance.instance_ec2.public_ip
           }
        provisioner "remote-exec"  {
            inline = [
                    "systemctl restart sshd",     
                    "sudo mkfs.ext4  /dev/xvdh",
                    "sudo mount  /dev/xvdh  /var/www/html",
                    "sudo https://sachinjangid2k.github.io/terra-image/ttt.html > index.html",                                 
                    "sudo cp index.html  /var/www/html/",
    ]
  }
} 

>>Cloud front attached to S3 bucket

resource "aws_cloudfront_distribution" "s3_distribution" {
  origin {
    domain_name = "${aws_s3_bucket.bucket1.bucket_regional_domain_name}"
    origin_id   = "s3-bucket1-bucket"
    s3_origin_config {
        origin_access_identity = "origin-access-identity/cloudfront/ABCDEFG1234567"
    }
}
        
enabled             = true
is_ipv6_enabled     = true
comment             = "Some comment"
default_root_object = "index.html"
        

logging_config {
    include_cookies = false
    bucket          = "mylogs.s3.amazonaws.com"
    prefix          = "myprefix"
}
        

aliases = ["mysite.example.com", "yoursite.example.com"]
        

default_cache_behavior {
    allowed_methods  = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"]
    cached_methods   = ["GET", "HEAD"]
    target_origin_id = "s3-bucket1-bucket"
        

    forwarded_values {
      query_string = false
        

      cookies {
          forward = "none"
      }
    }
        

    viewer_protocol_policy = "allow-all"
            min_ttl                = 0
            default_ttl            = 3600
            max_ttl                = 86400
    }

These are the steps for creating automation Anyone can create the whole environment in one click and destroy in click but as we know it will give error if we don't put "force_detach = true" in EBS attachment. just check that you had not done this mistake.

YOU CAN GET THIS CODE THROUGH BELOW LINK :

That's_it_guys.....

Thanks for going through it........ ;D

To view or add a comment, sign in

Others also viewed

Explore content categories