Commit 773cb3a4 authored by David Pestana's avatar David Pestana

add module directly imported by another

parents
// AWS AUTH config map
resource "kubernetes_config_map" "aws_auth" {
depends_on = [
aws_eks_cluster.cluster,
aws_security_group.cluster,
null_resource.wait_for_cluster
]
metadata {
name = "aws-auth"
namespace = "kube-system"
labels = merge(
{
"app.kubernetes.io/managed-by" = "Terraform"
},
var.aws_auth_additional_labels
)
}
data = {
mapRoles = yamlencode(
distinct(concat(
[{
rolearn = aws_iam_role.node.arn
username = "system:node:{{EC2PrivateDNSName}}",
groups = [
"system:bootstrappers",
"system:nodes"
]
}],
var.map_roles,
))
)
mapUsers = yamlencode(var.map_users)
mapAccounts = yamlencode(var.map_accounts)
}
}
# EKS module
# IAM role for the EKS cluster
resource "aws_iam_role" "cluster" {
name = var.cluster_name
assume_role_policy = jsonencode({
Statement = [{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "eks.amazonaws.com"
}
}]
Version = "2012-10-17"
})
}
# attaching our roles
resource "aws_iam_role_policy_attachment" "cluster_EKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = aws_iam_role.cluster.name
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSVPCResourceController" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"
role = aws_iam_role.cluster.name
}
resource "aws_security_group" "cluster" {
name = var.cluster_name
description = "Allow egress to all"
vpc_id = var.vpc_id
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_security_group_rule" "cluster_self" {
description = "Allow communication within the cluster"
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.cluster.id
source_security_group_id = aws_security_group.cluster.id
to_port = 0
type = "ingress"
}
# master and control plane
resource "aws_eks_cluster" "cluster" {
name = var.cluster_name
version = var.k8s_version
role_arn = aws_iam_role.cluster.arn
vpc_config {
security_group_ids = [aws_security_group.cluster.id]
subnet_ids = var.cluster_subnet_ids
}
enabled_cluster_log_types = var.logging
depends_on = [
aws_iam_role_policy_attachment.cluster_EKSClusterPolicy,
aws_iam_role_policy_attachment.cluster_AmazonEKSVPCResourceController,
]
}
resource "aws_cloudwatch_log_group" "logs" {
count = length(var.logging) == 0 ? 0 : 1
name = "/aws/eks/${var.cluster_name}/cluster"
retention_in_days = var.logging_retention_in_days
}
# enable IAM roles for service accounts
data "tls_certificate" "cluster" {
url = aws_eks_cluster.cluster.identity[0].oidc[0].issuer
}
resource "aws_iam_openid_connect_provider" "cluster" {
client_id_list = ["sts.amazonaws.com"]
thumbprint_list = [data.tls_certificate.cluster.certificates[0].sha1_fingerprint]
url = aws_eks_cluster.cluster.identity[0].oidc[0].issuer
}
data "aws_iam_policy_document" "sa_assume_role_policy" {
statement {
actions = ["sts:AssumeRoleWithWebIdentity"]
effect = "Allow"
condition {
test = "StringEquals"
variable = "${replace(aws_iam_openid_connect_provider.cluster.url, "https://", "")}:sub"
values = ["system:serviceaccount:kube-system:aws-node"]
}
principals {
identifiers = [aws_iam_openid_connect_provider.cluster.arn]
type = "Federated"
}
}
}
resource "aws_iam_role" "sa" {
assume_role_policy = data.aws_iam_policy_document.sa_assume_role_policy.json
name = "${var.cluster_name}-sa"
}
resource "null_resource" "wait_for_cluster" {
depends_on = [
aws_eks_cluster.cluster,
aws_security_group_rule.cluster_self,
]
provisioner "local-exec" {
command = "for i in `seq 1 60`; do if `command -v wget > /dev/null`; then wget --no-check-certificate -O - -q $ENDPOINT/healthz >/dev/null && exit 0 || true; else curl -k -s $ENDPOINT/healthz >/dev/null && exit 0 || true;fi; sleep 5; done; echo TIMEOUT && exit 1"
interpreter = ["/bin/sh", "-c"]
environment = {
ENDPOINT = aws_eks_cluster.cluster.endpoint
}
}
}
# EKS node pool setup
resource "aws_iam_role" "node" {
name = "${var.cluster_name}-node"
assume_role_policy = jsonencode({
Statement = [{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "ec2.amazonaws.com"
}
}]
Version = "2012-10-17"
})
}
resource "aws_iam_role_policy_attachment" "nodes_AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.node.name
}
resource "aws_iam_role_policy_attachment" "nodes_AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.node.name
}
resource "aws_iam_role_policy_attachment" "nodes_AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.node.name
}
# prepare the policy for autoscaling if needed
resource "aws_iam_role_policy_attachment" "autoscaler" {
role = aws_iam_role.node.name
policy_arn = aws_iam_policy.cluster_autoscaler.arn
}
resource "aws_iam_policy" "cluster_autoscaler" {
name_prefix = "cluster-autoscaler"
description = "EKS cluster-autoscaler policy for cluster ${var.cluster_name}"
policy = data.aws_iam_policy_document.cluster_autoscaler.json
}
data "aws_iam_policy_document" "cluster_autoscaler" {
statement {
sid = "clusterAutoscalerAll"
effect = "Allow"
actions = [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"ec2:DescribeLaunchTemplateVersions",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
]
resources = ["*"]
}
}
locals {
node_pools_expanded = { for k, v in var.node_pools : v["name"] => merge(
{
key_name = ""
instance_type = "t3.medium"
instance_count_max = 3
instance_count_min = 1
instance_disk_size = 20
labels = {}
tags = {}
},
v)
}
}
# create a node pool
resource "aws_eks_node_group" "nodes" {
for_each = local.node_pools_expanded
cluster_name = aws_eks_cluster.cluster.name
version = var.k8s_version
node_role_arn = aws_iam_role.node.arn
subnet_ids = var.nodes_subnet_ids
node_group_name = each.value["name"]
instance_types = [each.value["instance_type"]]
disk_size = each.value["instance_disk_size"]
labels = each.value["labels"]
scaling_config {
desired_size = each.value["instance_count_min"]
max_size = each.value["instance_count_max"]
min_size = each.value["instance_count_min"]
}
dynamic "remote_access" {
for_each = each.value["key_name"] != "" ? [{
ec2_ssh_key = each.value["key_name"]
source_security_group_ids = lookup(each.value, "source_security_group_ids", [])
}] : []
content {
ec2_ssh_key = remote_access.value["ec2_ssh_key"]
source_security_group_ids = remote_access.value["source_security_group_ids"]
}
}
# Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling.
# Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces.
depends_on = [
aws_iam_role_policy_attachment.nodes_AmazonEKSWorkerNodePolicy,
aws_iam_role_policy_attachment.nodes_AmazonEKS_CNI_Policy,
aws_iam_role_policy_attachment.nodes_AmazonEC2ContainerRegistryReadOnly,
kubernetes_config_map.aws_auth
]
lifecycle {
ignore_changes = [scaling_config[0].desired_size]
}
tags = each.value["tags"]
}
output "nodes_iam_role_arn" {
description = "IAM role arn of the nodes"
value = aws_iam_role.node.arn
}
output "cluster_iam_role_arn" {
description = "IAM role arn of the nodes"
value = aws_iam_role.cluster.arn
}
output "cluster_sg_id" {
description = "Security group used within the cluster"
value = aws_security_group.cluster.id
}
output "cluster_name" {
description = "The name of the cluster"
value = aws_eks_cluster.cluster.id
depends_on = [null_resource.wait_for_cluster]
}
variable "cluster_name" {
description = "The name of the EKS cluster"
type = string
}
variable "node_pools" {
description = "List of node pools settings to be created. Each item in the list is the additional node pool"
# Accepted object in the list
# {
# name = string,
# instance_type = string,
# instance_disk_size = number,
# instance_count_min = number,
# instance_count_max = number,
# }
type = any
default = []
}
variable "k8s_version" {
description = "Kubernetes version to deploy"
type = string
}
variable "logging" {
description = "Which types of the control plane logging to be enabled on the cluster, see: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html"
type = list(string)
default = []
}
variable "logging_retention_in_days" {
description = "Number of days to keep the logs in the Cloudwatch. Possible values are 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653, and 0 (to keep the logs forever)"
type = number
default = 7
}
variable "vpc_id" {
description = "The VPC is to use for the cluster"
type = string
}
variable "cluster_subnet_ids" {
description = "The list of subnets to use for the cluster"
type = list(string)
}
variable "nodes_subnet_ids" {
description = "The list of subnets to use for nodes"
type = list(string)
}
variable "aws_auth_additional_labels" {
description = "The lables which must be added to aws-auth configmap"
type = map(string)
default = {}
}
variable "map_roles" {
description = "Additional IAM roles to add to the aws-auth configmap"
type = list(object({
rolearn = string
username = string
groups = list(string)
}))
default = []
}
variable "map_users" {
description = "Additional IAM users to add to the aws-auth configmap"
type = list(object({
userarn = string
username = string
groups = list(string)
}))
default = []
}
variable "map_accounts" {
description = "Additional AWS account numbers to add to the aws-auth configmap"
type = list(string)
default = []
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment