如何使用 Terraform 在现有 VNET 中创建私有 AKS 集群

pnk*_*469 2 azure kubernetes terraform terraform-provider-azure azure-aks

我正在尝试使用 terraform 配置私有 AKS 集群。我想要将我的私有 AKS 群集连接到我使用 Azure 门户创建的现有 VNET 。

Azure 门户中提供了虚拟网络选项。请找到下面的图片。

在此输入图像描述

但是, azurerm_kubernetes_cluster上的 terraform 文档有关如何实现这一目标的信息非常有限。

main.tf在下面找到我的

resource "azurerm_kubernetes_cluster" "kubernetes_cluster" {                                                                
  name                    = var.cluster_name                                                                                
  location                = var.location                                                                                    
  resource_group_name     = var.resource_group_name                                                                         
  private_cluster_enabled = true                                                                                            
                                                                                                                            
  default_node_pool {                                                                                                       
    name           = "default"                                                                                              
    node_count     = var.node_count                                                                                         
    vm_size        = var.vm_size                                                                                            
    max_pods       = var.max_pods_count                                                                                     
  }                                                                                                                         
                                                                                                                            
  kube_dashboard {                                                                                                          
    enabled = true                                                                                                          
  }                                                                                                                         
                                                                                                                            
  network_profile {                                                                                                         
    network_plugin = "azure"   
  }                                                                                             
} 
Run Code Online (Sandbox Code Playgroud)

请注意,VNET 和要创建的集群共享相同的位置和资源组。

任何有关如何使用 Terraform 将私有 AKS 集群配置到现有 VNET 的帮助将不胜感激。

Ans*_*-MT 5

我使用了来自 Github 的现有代码,并进行了一些更改,因为我们已经有了 vnet,因此我使用数据块来获取现有 Vnet 的详细信息,而不是使用资源块,并且我没有使用默认子网,而是为 aks 创建了一个子网,为其他子网创建了一个子网防火墙。

terraform {
  required_version = ">= 0.14"
  required_providers {
    azurerm = {
      source  = "hashicorp/azurerm"
      version = ">=2.50.0"
    }
  }
}

provider "azurerm" {
  features {}
}

#local vars

locals {
  environment     = "test"
  resource_group = "AKS-test"
  resource_group_location = "East US"
  name_prefix     = "private-aks"
  aks_node_prefix = ["10.3.1.0/24"]
  firewall_prefix = ["10.3.2.0/24"]
}

#Existing vnet with address space "10.3.0.0/16"
data "azurerm_virtual_network" "base" {
  name                = "existing-vnet"
  resource_group_name = "AKS-test"
}

#subnets

resource "azurerm_subnet" "aks" {
  name                 = "snet-${local.name_prefix}-${local.environment}"
  resource_group_name  = local.resource_group
  address_prefixes     = local.aks_node_prefix
  virtual_network_name = data.azurerm_virtual_network.base.name
}

resource "azurerm_subnet" "firewall" {
  name                 = "AzureFirewallSubnet"
  resource_group_name  = local.resource_group
  virtual_network_name = data.azurerm_virtual_network.base.name
  address_prefixes     = local.firewall_prefix
}

#user assigned identity

resource "azurerm_user_assigned_identity" "base" {
  resource_group_name = local.resource_group
  location            = local.resource_group_location
  name                = "mi-${local.name_prefix}-${local.environment}"
}

#role assignment

resource "azurerm_role_assignment" "base" {
  scope                = "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/AKS-test"
  role_definition_name = "Network Contributor"
  principal_id         = azurerm_user_assigned_identity.base.principal_id
}

#route table

resource "azurerm_route_table" "base" {
  name                = "rt-${local.name_prefix}-${local.environment}"
  location            = data.azurerm_virtual_network.base.location
  resource_group_name = local.resource_group
}

#route 

resource "azurerm_route" "base" {
  name                   = "dg-${local.environment}"
  resource_group_name    = local.resource_group
  route_table_name       = azurerm_route_table.base.name
  address_prefix         = "0.0.0.0/0"
  next_hop_type          = "VirtualAppliance"
  next_hop_in_ip_address = azurerm_firewall.base.ip_configuration.0.private_ip_address
}

#route table association

resource "azurerm_subnet_route_table_association" "base" {
  subnet_id      = azurerm_subnet.aks.id
  route_table_id = azurerm_route_table.base.id
}

#firewall

resource "azurerm_public_ip" "base" {
  name                = "pip-firewall"
  location            = data.azurerm_virtual_network.base.location
  resource_group_name = local.resource_group
  allocation_method   = "Static"
  sku                 = "Standard"
}

resource "azurerm_firewall" "base" {
  name                = "fw-${local.name_prefix}-${local.environment}"
  location            = data.azurerm_virtual_network.base.location
  resource_group_name = local.resource_group

  ip_configuration {
    name                 = "ip-${local.name_prefix}-${local.environment}"
    subnet_id            = azurerm_subnet.firewall.id
    public_ip_address_id = azurerm_public_ip.base.id
  }
}

#kubernetes_cluster

resource "azurerm_kubernetes_cluster" "base" {
  name                    = "${local.name_prefix}-${local.environment}"
  location                = local.resource_group_location
  resource_group_name     = local.resource_group
  dns_prefix              = "dns-${local.name_prefix}-${local.environment}"
  private_cluster_enabled = true

  network_profile {
    network_plugin = "azure"
    outbound_type  = "userDefinedRouting"
  }

  default_node_pool {
    name           = "default"
    node_count     = 1
    vm_size        = "Standard_D2_v2"
    vnet_subnet_id = azurerm_subnet.aks.id
  }

  identity {
    type                      = "UserAssigned"
    user_assigned_identity_id = azurerm_user_assigned_identity.base.id
  }
  depends_on = [
      azurerm_route.base,
      azurerm_role_assignment.base
    ]
}
Run Code Online (Sandbox Code Playgroud)

参考: Github

测试前:

在此输入图像描述

对上述代码进行 terraform Plan:

在此输入图像描述

应用代码后:

在此输入图像描述

部署后:

在此输入图像描述

  • 感谢您的回答。这确实对我有帮助。在“default_node_pool”块中,我执行了以下操作:“vnet_subnet_id = data.azurerm.mysubnetname.id”此外,我在“main.tf”中使用现有子网作为数据源 执行此操作后,我的私有集群已连接到所需的 VNET。我希望文档说应将子网的“资源 id”提供给“vnet_subnet_id”参数。再次感谢大家:) (3认同)