Task -1 "Hybrid multi cloud using terraform"
Task description:
1. Create the key and security group which allow the port 80.
2. Launch EC2 instance.
3. In this Ec2 instance use the key and security group which we have created in step 1.
4. Launch one Volume (EBS) and mount that volume into /var/www/html
5. Developer have uploded the code into github repo also the repo has some images.
6. Copy the github repo code into /var/www/html
7. Create S3 bucket, and copy/deploy the images from github repo into the s3 bucket and change the permission to public readable.
8 Create a Cloudfront using s3 bucket(which contains images) and use the Cloudfront URL to update in code in /var/www/html.
Job :-1
Code for creating key_pairs
In this code i have created one key pair named as "cloudkey"
provider "aws" {
region = "ap-south-1"
profile = "awsprofile"
}
resource "tls_private_key" "cloudkey" {
algorithm = "RSA"
}
resource "aws_key_pair" "keypair" {
key_name = "cloudkey"
public_key = tls_private_key.cloudkey.public_key_openssh
depends_on = [
tls_private_key.cloudkey
]
}
variable "new_key_for_cloudtask1" {
type = string
default = "cloudkey"
}
Job :- 2
Creating Security Group :-
In this code i have created one security group named as "security_3" . This security group allows the port 80 for HTTP and port 22 for SSH
resource "aws_security_group" "security_3" {
name = "security_3"
description = "Allow traffic"
ingress {
description = "SSH"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "HTTP"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "security_3"
}
}
Job :- 3
Launching EC2 Instances :-
In this code i have launched an EC2 Instances named as "sikki1" using key pair i.e "cloudkey" and security group i.e "security_3" and i also installed httpd server and php in this instance!
resource "aws_instance" "myin1" {
ami = "ami-07a8c73a650069cf3"
instance_type = "t2.micro"
key_name = aws_key_pair.keypair.key_name
security_groups = [ "security_3" ]
tags = {
Name = "Sikki1"
}
provisioner "remote-exec"{
connection {
type = "ssh"
user = "ec2-user"
agent = "false"
private_key = tls_private_key.cloudkey.private_key_pem
host = aws_instance.myin1.public_ip
}
inline = [
"sudo yum install httpd php git -y",
"sudo systemctl restart httpd",
"sudo systemctl enable httpd",
]
}
}
Job :- 4
Code for creating EBS Volume:-
In this code have launched an EBS Volume named as "ebs"!
resource "aws_ebs_volume" "esb1" {
availability_zone = aws_instance.myin1.availability_zone
size = 1
tags = {
Name = "ebs"
}
}
Job :- 5
Code for Attaching ebs volume to the EC2 Instance named as "sikki1"!
resource "aws_volume_attachment" "ebs_att" {
device_name = "/dev/sdh"
volume_id = aws_ebs_volume.esb1.id
instance_id = aws_instance.myin1.id
force_detach = true
}
Job :- 6
Code for Creating a S3 bucket:-
In this code i have created an S3 Bucket named as "nikhilcloudtc1" and also uploaded one image in this bucket!
resource "aws_s3_bucket" "bucket" {
bucket = "nikhilcloudtc1"
acl = "private"
force_destroy = "true"
versioning {
enabled = true
}
}
resource "null_resource" "local-1" {
depends_on = [
aws_s3_bucket.bucket,
]
provisioner "local-exec" {
command = "git clone https://github.com/nc960058/image3.git"
}
}
resource "aws_s3_bucket_object" "file_upload" {
depends_on = [
aws_s3_bucket.bucket ,
null_resource.local-1
]
bucket = aws_s3_bucket.bucket.id
key = "nikhil.jpg"
source = "image3/nikhil.jpg"
acl = "public-read"
}
output "Image" {
value = aws_s3_bucket_object.file_upload
}
Job :- 7
Creating a cloud front:-
resource "aws_cloudfront_distribution" "distribution" {
depends_on = [
aws_s3_bucket.bucket ,
null_resource.local-1
]
origin {
domain_name = aws_s3_bucket.bucket.bucket_regional_domain_name
origin_id = "S3-nikhilcloudtc1-id"
custom_origin_config {
http_port = 80
https_port = 80
origin_protocol_policy = "match-viewer"
origin_ssl_protocols = ["TLSv1", "TLSv1.1", "TLSv1.2"]
}
}
enabled = true
default_cache_behavior {
allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"]
cached_methods = ["GET", "HEAD"]
target_origin_id = "S3-nikhilcloudtc1-id"
forwarded_values {
query_string = false
cookies {
forward = "none"
}
}
viewer_protocol_policy = "allow-all"
min_ttl = 0
default_ttl = 3600
max_ttl = 86400
}
restrictions {
geo_restriction {
restriction_type = "none"
}
}
viewer_certificate {
cloudfront_default_certificate = true
}
}
output "domain-name" {
value = aws_cloudfront_distribution.distribution.domain_name
}
Job :- 8
Uploading code and image from github repo:-
resource "null_resource" "nullremote3" {
depends_on = [
aws_volume_attachment.ebs_att,
]
connection {
type = "ssh"
user = "ec2-user"
private_key = tls_private_key.cloudkey.private_key_pem
host = aws_instance.myin1.public_ip
}
provisioner "remote-exec" {
inline = [
"sudo mkfs.ext4 /dev/xvdh",
"sudo mount /dev/xvdh /var/www/html",
"sudo rm -rf /var/www/html/*",
"sudo git clone https://github.com/nc960058/final3.git /var/www/html/"
]
}
}
Job :- 9
Final code for showing the output:-
resource "null_resource" "nulllocal1" {
depends_on = [
null_resource.nullremote3,
]
provisioner "local-exec" {
command = "start chrome ${aws_instance.myin1.public_ip}"
}
}
After this whole setup use the following commands:-
terraform init terraform apply -auto-approve
Keep Going !!