Automated Infrastructure in AWS
TASK-1
1. Create the key and security group which allows the port 80.
2. Launch EC2 instance.
3. In this Ec2 instance use the key and security group which we have created in step 1.
4. Launch one Volume (EBS) and mount that volume into /var/www/html
5. A developer has uploaded the code into Github repo also the repo has some images.
6. Copy the GitHub repo code into /var/www/html
7. Create an S3 bucket, and copy/deploy the images from Github repo into the s3 bucket and change the permission to public readable.
8 Create a Cloudfront using s3 bucket(which contains images) and use the Cloudfront URL to update in code in /var/www/html
HERE IS MY CODE
provider “aws” {
profile = “task6”
region = “ap-south-1”
}
resource “aws_security_group” “allow” {
name = “allow_traffic”
description = “Allow TLS inbound traffic”
ingress {
description = “allow ssh”
from_port = 22
to_port = 22
protocol = “tcp”
cidr_blocks = [“0.0.0.0/0”]
}
ingress {
description = “allow https”
from_port = 80
to_port = 80
protocol = “tcp”
cidr_blocks = [“0.0.0.0/0”]
}
egress {
from_port = 0
to_port = 0
protocol = “-1”
cidr_blocks = [“0.0.0.0/0”]
}
tags = {
Name = “myinstanceSQ”
}
}
# Launching ec2 instance
resource “aws_instance” “web” {
ami = “ami-0ebc1ac48dfd14136”
instance_type = “t2.micro”
security_groups = [“allow_traffic”]
key_name = “task6”
tags = {
Name = “MyEc2Instance”
}
connection {
type = “ssh”
user = “ec2-user”
private_key = file(“C:/Users/vinee/Downloads/task6.pem”)
host = aws_instance.web.public_ip
}
provisioner “remote-exec” {
inline = [
“sudo yum install httpd php git -y”,
“sudo systemctl restart httpd”,
“sudo systemctl enable httpd”,
]
}
}
# Launching ebs volume
resource “aws_ebs_volume” “ebsvol” {
availability_zone = aws_instance.web.availability_zone
size = 1
tags = {
Name = “PrmntVolume”
}
}
# its time to attch a volume
resource “aws_volume_attachment” “ebs_att” {
depends_on = [ aws_ebs_volume.ebsvol,]
device_name = “/dev/sdf”
volume_id = “${aws_ebs_volume.ebsvol.id}”
instance_id = “${aws_instance.web.id}”
force_detach = true
connection {
type = “ssh”
user = “ec2-user”
private_key = file(“C:/Users/vinee/Downloads/task6.pem”)
host = aws_instance.web.public_ip
}
provisioner “remote-exec” {
inline = [
“sudo mkfs.ext4 /dev/xvdf”,
“sudo mount /dev/xvdf /var/www/html”,
“sudo rm -rf /var/www/html/*”,
“sudo git clone https://github.com/GitNegi/muticloud.git /var/www/html/”
]
}
}
# creating s3 bucket
resource “aws_s3_bucket” “bbbcde” {
bucket = “vineetvtx”
acl = “public-read”
// region = “ap-south-1”
versioning {enabled = true}
tags = {
Name = “My bucket”
}
}
resource “aws_s3_bucket_object” “object” {
depends_on = [aws_s3_bucket.bbbcde,]
bucket = aws_s3_bucket.bbbcde.bucket
key = “images.png”
source = “C:/Users/vinee/Desktop/images.png”
acl = “public-read”
}
variable “oid”{
type = string
default = “S3-”
}
locals {
s3_origin_id = “${var.oid}${aws_s3_bucket.bbbcde.id}”
}
resource “aws_cloudfront_distribution” “s3_distribution” {
depends_on = [aws_s3_bucket_object.object,]
origin {
domain_name = “${aws_s3_bucket.bbbcde.bucket_regional_domain_name}”
origin_id = “${local.s3_origin_id}”
}
enabled = true
default_cache_behavior {
allowed_methods = [“DELETE”, “GET”, “HEAD”, “OPTIONS”, “PATCH”, “POST”, “PUT”]
cached_methods = [“GET”, “HEAD”]
target_origin_id = “${local.s3_origin_id}”
forwarded_values {
query_string = false
cookies {
forward = “none”
}
}
viewer_protocol_policy = “allow-all”
min_ttl = 0
default_ttl = 3600
max_ttl = 86400
}
restrictions {
geo_restriction {
restriction_type = “none”
}
}
viewer_certificate {
cloudfront_default_certificate = true
}
connection {
type = “ssh”
user = “ec2-user”
private_key = file(“C:/Users/vinee/Downloads/task6.pem”)
host = aws_instance.web.public_ip
}
provisioner “remote-exec” {
inline = [
“sudo su <<END”,
“echo \”<img src=’http://${aws_cloudfront_distribution.s3_distribution.domain_name}/${aws_s3_bucket_object.object.key}' height=’250' width=’255'>\” >> /var/www/html/index.php”,
“END”,
]
}
}
resource “null_resource” “null2” {
depends_on = [
aws_cloudfront_distribution.s3_distribution, aws_volume_attachment.ebs_att,
]
provisioner “local-exec” {
command = “start chrome http://${aws_instance.web.public_ip}"
}
}
Terraform apply — auto-approve -
Result of above code
DESTROY AFTER THE USE
terraform destroy — auto-approve