全部产品
Search
文档中心

容器服务 Kubernetes 版 ACK:使用Terraform创建ACK托管版集群

更新时间:Nov 26, 2024

本文介绍如何使用Terraform创建ACK托管集群

说明

本教程所含示例代码支持一键运行,您可以直接运行代码。一键运行

前提条件

  • 已开通容器服务 Kubernetes 版ACK。若需要使用Terraform开通,请参见通过Terraform开通ACK并授权角色

  • 由于阿里云账号(主账号)具有资源的所有权限,一旦发生泄露将面临重大风险。建议您使用RAM用户,并为该RAM用户创建AccessKey,具体操作方式请参见创建RAM用户创建AccessKey

  • 为运行Terraform命令的RAM用户绑定以下最小权限策略,以获取管理本示例所涉及资源的权限。更多信息,请参见为RAM用户授权

    该权限策略允许RAM用户进行VPC、交换机及ACK的创建、查看与删除操作。

    {
      "Version": "1",
      "Statement": [
        {
          "Effect": "Allow",
          "Action": [
            "vpc:CreateVpc",
            "vpc:CreateVSwitch",
            "cs:CreateCluster",
            "vpc:DescribeVpcAttribute",
            "vpc:DescribeVSwitchAttributes",
            "vpc:DescribeRouteTableList",
            "vpc:DescribeNatGateways",
            "cs:DescribeTaskInfo",
            "cs:DescribeClusterDetail",
            "cs:GetClusterCerts",
            "cs:CheckControlPlaneLogEnable",
            "cs:CreateClusterNodePool",
            "cs:DescribeClusterNodePoolDetail",
            "cs:ModifyClusterNodePool",
            "vpc:DeleteVpc",
            "vpc:DeleteVSwitch",
            "cs:DeleteCluster",
            "cs:DeleteClusterNodepool"
          ],
          "Resource": "*"
        }
      ]
    }
  • 准备Terraform运行环境,您可以选择以下任一方式来使用Terraform。

    • Cloud Shell:阿里云Cloud Shell中预装了Terraform的组件,并已配置好身份凭证,您可直接在Cloud Shell中运行Terraform的命令。适用于低成本、快速、便捷地访问和使用Terraform的场景。

    • 在本地安装和配置Terraform:适用于网络连接较差或需要自定义开发环境的场景。

    重要

    请确保版本不低于v0.12.28。如需检查现有版本,请运行terraform --version命令。

使用的资源

说明

本教程示例包含的部分资源会产生一定费用,请在不需要时及时进行释放或退订。

使用Terraform创建ACK托管集群(Terway)

本示例将创建一个包含普通节点池、托管节点池及自动伸缩节点池的ACK托管集群,并为该集群默认安装一系列组件,包括Terway(网络组件)、csi-plugin(存储组件)、csi-provisioner(存储组件)、logtail-ds(日志组件)、Nginx Ingress Controller、ack-arms-prometheus(监控组件)以及ack-node-problem-detector(节点诊断组件)。

  1. 创建一个工作目录,并在该工作目录中创建名为main.tf的配置文件,然后将以下代码复制到main.tf中。

    provider "alicloud" {
      region = var.region_id
    }
    
    variable "region_id" {
      type    = string
      default = "cn-shenzhen"
    }
    
    variable "cluster_spec" {
      type        = string
      description = "The cluster specifications of kubernetes cluster,which can be empty. Valid values:ack.standard : Standard managed clusters; ack.pro.small : Professional managed clusters."
      default     = "ack.pro.small"
    }
    
    variable "ack_version" {
      type        = string
      description = "Desired Kubernetes version. "
      default     = "1.28.9-aliyun.1"
    }
    
    # 指定虚拟交换机(vSwitches)的可用区。
    variable "availability_zone" {
      description = "The availability zones of vswitches."
      default     = ["cn-shenzhen-c", "cn-shenzhen-e", "cn-shenzhen-f"]
    }
    
    # 指定交换机ID(vSwitch IDs)的列表。
    variable "node_vswitch_ids" {
      description = "List of existing node vswitch ids for terway."
      type        = list(string)
      default     = []
    }
    
    # 当没有提供node_vswitch_ids时,这个变量定义了用于创建新vSwitches的CIDR地址块列表。
    variable "node_vswitch_cidrs" {
      description = "List of cidr blocks used to create several new vswitches when 'node_vswitch_ids' is not specified."
      type        = list(string)
      default     = ["172.16.0.0/23", "172.16.2.0/23", "172.16.4.0/23"]
    }
    
    # 指定网络组件Terway配置。如果为空,默认会根据terway_vswitch_cidrs的创建新的terway vSwitch。
    variable "terway_vswitch_ids" {
      description = "List of existing pod vswitch ids for terway."
      type        = list(string)
      default     = []
    }
    
    # 当没有指定terway_vswitch_ids时,用于创建Terway使用的vSwitch的CIDR地址块。
    variable "terway_vswitch_cidrs" {
      description = "List of cidr blocks used to create several new vswitches when 'terway_vswitch_ids' is not specified."
      type        = list(string)
      default     = ["172.16.208.0/20", "172.16.224.0/20", "172.16.240.0/20"]
    }
    
    # 定义了用于启动工作节点的ECS实例类型。
    variable "worker_instance_types" {
      description = "The ecs instance types used to launch worker nodes."
      default     = ["ecs.g6.2xlarge", "ecs.g6.xlarge"]
    }
    
    # 设置工作节点的密码。
    variable "password" {
      description = "The password of ECS instance."
      default     = "Test123456"
    }
    
    # 指定ACK集群安装的组件。包括Terway(网络组件)、csi-plugin(存储组件)、csi-provisioner(存储组件)、logtail-ds(日志组件)、Nginx Ingress Controller、ack-arms-prometheus(监控组件)以及ack-node-problem-detector(节点诊断组件)。
    variable "cluster_addons" {
      type = list(object({
        name   = string
        config = string
      }))
    
      default = [
        {
          "name"   = "terway-eniip",
          "config" = "",
        },
        {
          "name"   = "logtail-ds",
          "config" = "{\"IngressDashboardEnabled\":\"true\"}",
        },
        {
          "name"   = "nginx-ingress-controller",
          "config" = "{\"IngressSlbNetworkType\":\"internet\"}",
        },
        {
          "name"   = "arms-prometheus",
          "config" = "",
        },
        {
          "name"   = "ack-node-problem-detector",
          "config" = "{\"sls_project_name\":\"\"}",
        },
        {
          "name"   = "csi-plugin",
          "config" = "",
        },
        {
          "name"   = "csi-provisioner",
          "config" = "",
        }
      ]
    }
    
    # 指定创建ACK托管集群名称的前缀。
    variable "k8s_name_prefix" {
      description = "The name prefix used to create managed kubernetes cluster."
      default     = "tf-ack-shenzhen"
    }
    
    # 默认资源名称。
    locals {
      k8s_name_terway         = substr(join("-", [var.k8s_name_prefix, "terway"]), 0, 63)
      k8s_name_flannel        = substr(join("-", [var.k8s_name_prefix, "flannel"]), 0, 63)
      k8s_name_ask            = substr(join("-", [var.k8s_name_prefix, "ask"]), 0, 63)
      new_vpc_name            = "tf-vpc-172-16"
      new_vsw_name_azD        = "tf-vswitch-azD-172-16-0"
      new_vsw_name_azE        = "tf-vswitch-azE-172-16-2"
      new_vsw_name_azF        = "tf-vswitch-azF-172-16-4"
      nodepool_name           = "default-nodepool"
      managed_nodepool_name   = "managed-node-pool"
      autoscale_nodepool_name = "autoscale-node-pool"
      log_project_name        = "log-for-${local.k8s_name_terway}"
    }
    
    # 节点ECS实例配置。将查询满足CPU、Memory要求的ECS实例类型。
    data "alicloud_instance_types" "default" {
      cpu_core_count       = 8
      memory_size          = 32
      availability_zone    = var.availability_zone[0]
      kubernetes_node_role = "Worker"
    }
    
    # 专有网络。
    resource "alicloud_vpc" "default" {
      vpc_name   = local.new_vpc_name
      cidr_block = "172.16.0.0/12"
    }
    
    # Node交换机。
    resource "alicloud_vswitch" "vswitches" {
      count      = length(var.node_vswitch_ids) > 0 ? 0 : length(var.node_vswitch_cidrs)
      vpc_id     = alicloud_vpc.default.id
      cidr_block = element(var.node_vswitch_cidrs, count.index)
      zone_id    = element(var.availability_zone, count.index)
    }
    
    # Pod交换机。
    resource "alicloud_vswitch" "terway_vswitches" {
      count      = length(var.terway_vswitch_ids) > 0 ? 0 : length(var.terway_vswitch_cidrs)
      vpc_id     = alicloud_vpc.default.id
      cidr_block = element(var.terway_vswitch_cidrs, count.index)
      zone_id    = element(var.availability_zone, count.index)
    }
    
    # Kubernetes托管版。
    resource "alicloud_cs_managed_kubernetes" "default" {
      name                         = local.k8s_name_terway # Kubernetes集群名称。
      cluster_spec                 = var.cluster_spec      # 创建Pro版集群。
      version                      = var.ack_version
      worker_vswitch_ids           = split(",", join(",", alicloud_vswitch.vswitches.*.id))        # 节点池所在的vSwitch。指定一个或多个vSwitch的ID,必须在availability_zone指定的区域中。
      pod_vswitch_ids              = split(",", join(",", alicloud_vswitch.terway_vswitches.*.id)) # Pod虚拟交换机。
      new_nat_gateway              = true                                                          # 是否在创建Kubernetes集群时创建新的NAT网关。默认为true。
      service_cidr                 = "10.11.0.0/16"                                                # Pod网络的CIDR块。当cluster_network_type设置为flannel,你必须设定该参数。它不能与VPC CIDR相同,并且不能与VPC中的Kubernetes集群使用的CIDR相同,也不能在创建后进行修改。集群中允许的最大主机数量:256。
      slb_internet_enabled         = true                                                          # 是否为API Server创建Internet负载均衡。默认为false。
      enable_rrsa                  = true
      control_plane_log_components = ["apiserver", "kcm", "scheduler", "ccm"] # 控制平面日志。
    
      dynamic "addons" { # 组件管理。
        for_each = var.cluster_addons
        content {
          name   = lookup(addons.value, "name", var.cluster_addons)
          config = lookup(addons.value, "config", var.cluster_addons)
        }
      }
    }
    
    # 普通节点池。
    resource "alicloud_cs_kubernetes_node_pool" "default" {
      cluster_id            = alicloud_cs_managed_kubernetes.default.id              # Kubernetes集群名称。
      node_pool_name        = local.nodepool_name                                    # 节点池名称。
      vswitch_ids           = split(",", join(",", alicloud_vswitch.vswitches.*.id)) # 节点池所在的vSwitch。指定一个或多个vSwitch的ID,必须在availability_zone指定的区域中。
      instance_types        = var.worker_instance_types
      instance_charge_type  = "PostPaid"
      runtime_name          = "containerd"
      runtime_version       = "1.6.20"
      desired_size          = 2            # 节点池的期望节点数。
      password              = var.password # SSH登录集群节点的密码。
      install_cloud_monitor = true         # 是否为Kubernetes的节点安装云监控。
      system_disk_category  = "cloud_efficiency"
      system_disk_size      = 100
      image_type            = "AliyunLinux"
    
      data_disks {              # 节点数据盘配置。
        category = "cloud_essd" # 节点数据盘种类。
        size     = 120          # 节点数据盘大小。
      }
    }
    
    # 创建托管节点池。
    resource "alicloud_cs_kubernetes_node_pool" "managed_node_pool" {
      cluster_id     = alicloud_cs_managed_kubernetes.default.id              # Kubernetes集群名称。
      node_pool_name = local.managed_nodepool_name                            # 节点池名称。
      vswitch_ids    = split(",", join(",", alicloud_vswitch.vswitches.*.id)) # 节点池所在的vSwitch。指定一个或多个vSwitch的ID,必须在availability_zone指定的区域中。
      desired_size   = 0                                                      # 节点池的期望节点数。
    
      management {
        auto_repair     = true
        auto_upgrade    = true
        max_unavailable = 1
      }
    
      instance_types        = var.worker_instance_types
      instance_charge_type  = "PostPaid"
      runtime_name          = "containerd"
      runtime_version       = "1.6.20"
      password              = var.password
      install_cloud_monitor = true
      system_disk_category  = "cloud_efficiency"
      system_disk_size      = 100
      image_type            = "AliyunLinux"
    
      data_disks {
        category = "cloud_essd"
        size     = 120
      }
    }
    
    # 创建自动伸缩节点池,节点池最多可以扩展到 10 个节点,最少保持 1 个节点。
    resource "alicloud_cs_kubernetes_node_pool" "autoscale_node_pool" {
      cluster_id     = alicloud_cs_managed_kubernetes.default.id
      node_pool_name = local.autoscale_nodepool_name
      vswitch_ids    = split(",", join(",", alicloud_vswitch.vswitches.*.id))
    
      scaling_config {
        min_size = 1
        max_size = 10
      }
    
      instance_types        = var.worker_instance_types
      runtime_name          = "containerd"
      runtime_version       = "1.6.20"
      password              = var.password # SSH登录集群节点的密码。
      install_cloud_monitor = true         # 是否为kubernetes的节点安装云监控。
      system_disk_category  = "cloud_efficiency"
      system_disk_size      = 100
      image_type            = "AliyunLinux3"
    
      data_disks {              # 节点数据盘配置。
        category = "cloud_essd" # 节点数据盘种类。
        size     = 120          # 节点数据盘大小。
      }
    }
  2. 执行以下命令,初始化Terraform运行环境。

    terraform init

    返回如下信息,表示Terraform初始化成功。

    Terraform has been successfully initialized!
    
    You may now begin working with Terraform. Try running "terraform plan" to see
    any changes that are required for your infrastructure. All Terraform commands
    should now work.
    
    If you ever set or change modules or backend configuration for Terraform,
    rerun this command to reinitialize your working directory. If you forget, other
    commands will detect it and remind you to do so if necessary.
  3. 创建执行计划,并预览变更。

    terraform plan
  4. 执行以下命令,创建集群。

    terraform apply

    在执行过程中,根据提示输入yes并按下Enter键,等待命令执行完成,若出现以下信息,则表示ACK集群创建成功。

    Do you want to perform these actions?
      Terraform will perform the actions described above.
      Only 'yes' will be accepted to approve.
    
      Enter a value: yes
    
    ...
    alicloud_cs_managed_kubernetes.default: Creation complete after 5m48s [id=ccb53e72ec6c447c990762800********]
    ...
    
    Apply complete! Resources: 11 added, 0 changed, 0 destroyed.
  5. 验证结果

    执行terraform show命令

    您可以使用以下命令查询Terraform已创建的资源详细信息。

    terraform show

    登录ACK控制台

    登录容器服务管理控制台,查看已创建的集群。

清理资源

当您不再需要上述通过Terraform创建或管理的资源时,请运行terraform destroy命令以释放资源。关于terraform destroy的更多信息,请参见Terraform常用命令

terraform destroy

完整示例

说明

当前示例代码支持一键运行,您可以直接运行代码。一键运行

provider "alicloud" {
  region = var.region_id
}

variable "region_id" {
  type    = string
  default = "cn-shenzhen"
}

variable "cluster_spec" {
  type        = string
  description = "The cluster specifications of kubernetes cluster,which can be empty. Valid values:ack.standard : Standard managed clusters; ack.pro.small : Professional managed clusters."
  default     = "ack.pro.small"
}

variable "ack_version" {
  type        = string
  description = "Desired Kubernetes version. "
  default     = "1.28.9-aliyun.1"
}

# 指定虚拟交换机(vSwitches)的可用区。
variable "availability_zone" {
  description = "The availability zones of vswitches."
  default     = ["cn-shenzhen-c", "cn-shenzhen-e", "cn-shenzhen-f"]
}

# 指定交换机ID(vSwitch IDs)的列表。
variable "node_vswitch_ids" {
  description = "List of existing node vswitch ids for terway."
  type        = list(string)
  default     = []
}

# 用于创建新vSwitches的CIDR地址块列表。
variable "node_vswitch_cidrs" {
  description = "List of cidr blocks used to create several new vswitches when 'node_vswitch_ids' is not specified."
  type        = list(string)
  default     = ["172.16.0.0/23", "172.16.2.0/23", "172.16.4.0/23"]
}

# 指定网络组件Terway配置。如果为空,默认会根据terway_vswitch_cidrs的创建新的terway vSwitch。
variable "terway_vswitch_ids" {
  description = "List of existing pod vswitch ids for terway."
  type        = list(string)
  default     = []
}

# 当没有指定terway_vswitch_ids时,用于创建Terway使用的vSwitch的CIDR地址块。
variable "terway_vswitch_cidrs" {
  description = "List of cidr blocks used to create several new vswitches when 'terway_vswitch_ids' is not specified."
  type        = list(string)
  default     = ["172.16.208.0/20", "172.16.224.0/20", "172.16.240.0/20"]
}

# 定义了用于启动工作节点的ECS实例类型。
variable "worker_instance_types" {
  description = "The ecs instance types used to launch worker nodes."
  default     = ["ecs.g6.2xlarge", "ecs.g6.xlarge"]
}

# 设置工作阶段的密码
variable "password" {
  description = "The password of ECS instance."
  default     = "Test123456"
}

# 指定ACK集群安装的组件。包括Terway(网络组件)、csi-plugin(存储组件)、csi-provisioner(存储组件)、logtail-ds(日志组件)、Nginx Ingress Controller、ack-arms-prometheus(监控组件)以及ack-node-problem-detector(节点诊断组件)。
variable "cluster_addons" {
  type = list(object({
    name   = string
    config = string
  }))

  default = [
    {
      "name"   = "terway-eniip",
      "config" = "",
    },
    {
      "name"   = "logtail-ds",
      "config" = "{\"IngressDashboardEnabled\":\"true\"}",
    },
    {
      "name"   = "nginx-ingress-controller",
      "config" = "{\"IngressSlbNetworkType\":\"internet\"}",
    },
    {
      "name"   = "arms-prometheus",
      "config" = "",
    },
    {
      "name"   = "ack-node-problem-detector",
      "config" = "{\"sls_project_name\":\"\"}",
    },
    {
      "name"   = "csi-plugin",
      "config" = "",
    },
    {
      "name"   = "csi-provisioner",
      "config" = "",
    }
  ]
}

# 指定创建ACK托管集群名称的前缀。
variable "k8s_name_prefix" {
  description = "The name prefix used to create managed kubernetes cluster."
  default     = "tf-ack-shenzhen"
}

# 默认资源名称。
locals {
  k8s_name_terway         = substr(join("-", [var.k8s_name_prefix, "terway"]), 0, 63)
  k8s_name_flannel        = substr(join("-", [var.k8s_name_prefix, "flannel"]), 0, 63)
  k8s_name_ask            = substr(join("-", [var.k8s_name_prefix, "ask"]), 0, 63)
  new_vpc_name            = "tf-vpc-172-16"
  new_vsw_name_azD        = "tf-vswitch-azD-172-16-0"
  new_vsw_name_azE        = "tf-vswitch-azE-172-16-2"
  new_vsw_name_azF        = "tf-vswitch-azF-172-16-4"
  nodepool_name           = "default-nodepool"
  managed_nodepool_name   = "managed-node-pool"
  autoscale_nodepool_name = "autoscale-node-pool"
  log_project_name        = "log-for-${local.k8s_name_terway}"
}

# 节点ECS实例配置。将查询满足CPU、Memory要求的ECS实例类型。
data "alicloud_instance_types" "default" {
  cpu_core_count       = 8
  memory_size          = 32
  availability_zone    = var.availability_zone[0]
  kubernetes_node_role = "Worker"
}

# 专有网络。
resource "alicloud_vpc" "default" {
  vpc_name   = local.new_vpc_name
  cidr_block = "172.16.0.0/12"
}

# Node交换机。
resource "alicloud_vswitch" "vswitches" {
  count      = length(var.node_vswitch_ids) > 0 ? 0 : length(var.node_vswitch_cidrs)
  vpc_id     = alicloud_vpc.default.id
  cidr_block = element(var.node_vswitch_cidrs, count.index)
  zone_id    = element(var.availability_zone, count.index)
}

# Pod交换机。
resource "alicloud_vswitch" "terway_vswitches" {
  count      = length(var.terway_vswitch_ids) > 0 ? 0 : length(var.terway_vswitch_cidrs)
  vpc_id     = alicloud_vpc.default.id
  cidr_block = element(var.terway_vswitch_cidrs, count.index)
  zone_id    = element(var.availability_zone, count.index)
}

# Kubernetes托管版。
resource "alicloud_cs_managed_kubernetes" "default" {
  name                         = local.k8s_name_terway # Kubernetes集群名称。
  cluster_spec                 = var.cluster_spec      # 创建Pro版集群。
  version                      = var.ack_version
  worker_vswitch_ids           = split(",", join(",", alicloud_vswitch.vswitches.*.id))        # 节点池所在的vSwitch。指定一个或多个vSwitch的ID,必须在availability_zone指定的区域中。
  pod_vswitch_ids              = split(",", join(",", alicloud_vswitch.terway_vswitches.*.id)) # Pod虚拟交换机。
  new_nat_gateway              = true                                                          # 是否在创建Kubernetes集群时创建新的NAT网关。默认为true。
  service_cidr                 = "10.11.0.0/16"                                                # Pod网络的CIDR块。当cluster_network_type设置为flannel,你必须设定该参数。它不能与VPC CIDR相同,并且不能与VPC中的Kubernetes集群使用的CIDR相同,也不能在创建后进行修改。集群中允许的最大主机数量:256。
  slb_internet_enabled         = true                                                          # 是否为API Server创建Internet负载均衡。默认为false。
  enable_rrsa                  = true
  control_plane_log_components = ["apiserver", "kcm", "scheduler", "ccm"] # 控制平面日志。

  dynamic "addons" { # 组件管理。
    for_each = var.cluster_addons
    content {
      name   = lookup(addons.value, "name", var.cluster_addons)
      config = lookup(addons.value, "config", var.cluster_addons)
    }
  }
}

# 普通节点池。
resource "alicloud_cs_kubernetes_node_pool" "default" {
  cluster_id            = alicloud_cs_managed_kubernetes.default.id              # Kubernetes集群名称。
  node_pool_name        = local.nodepool_name                                    # 节点池名称。
  vswitch_ids           = split(",", join(",", alicloud_vswitch.vswitches.*.id)) # 节点池所在的vSwitch。指定一个或多个vSwitch的ID,必须在availability_zone指定的区域中。
  instance_types        = var.worker_instance_types
  instance_charge_type  = "PostPaid"
  runtime_name          = "containerd"
  runtime_version       = "1.6.20"
  desired_size          = 2            # 节点池的期望节点数。
  password              = var.password # SSH登录集群节点的密码。
  install_cloud_monitor = true         # 是否为Kubernetes的节点安装云监控。
  system_disk_category  = "cloud_efficiency"
  system_disk_size      = 100
  image_type            = "AliyunLinux"

  data_disks {              # 节点数据盘配置。
    category = "cloud_essd" # 节点数据盘种类。
    size     = 120          # 节点数据盘大小。
  }
}

# 创建托管节点池。
resource "alicloud_cs_kubernetes_node_pool" "managed_node_pool" {
  cluster_id     = alicloud_cs_managed_kubernetes.default.id              # Kubernetes集群名称。
  node_pool_name = local.managed_nodepool_name                            # 节点池名称。
  vswitch_ids    = split(",", join(",", alicloud_vswitch.vswitches.*.id)) # 节点池所在的vSwitch。指定一个或多个vSwitch的ID,必须在availability_zone指定的区域中。
  desired_size   = 0                                                      # 节点池的期望节点数。

  management {
    auto_repair     = true
    auto_upgrade    = true
    max_unavailable = 1
  }

  instance_types        = var.worker_instance_types
  instance_charge_type  = "PostPaid"
  runtime_name          = "containerd"
  runtime_version       = "1.6.20"
  password              = var.password
  install_cloud_monitor = true
  system_disk_category  = "cloud_efficiency"
  system_disk_size      = 100
  image_type            = "AliyunLinux"

  data_disks {
    category = "cloud_essd"
    size     = 120
  }
}

# 创建自动伸缩节点池,节点池最多可以扩展到 10 个节点,最少保持 1 个节点。
resource "alicloud_cs_kubernetes_node_pool" "autoscale_node_pool" {
  cluster_id     = alicloud_cs_managed_kubernetes.default.id
  node_pool_name = local.autoscale_nodepool_name
  vswitch_ids    = split(",", join(",", alicloud_vswitch.vswitches.*.id))

  scaling_config {
    min_size = 1
    max_size = 10
  }

  instance_types        = var.worker_instance_types
  runtime_name          = "containerd"
  runtime_version       = "1.6.20"
  password              = var.password # SSH登录集群节点的密码。
  install_cloud_monitor = true         # 是否为kubernetes的节点安装云监控。
  system_disk_category  = "cloud_efficiency"
  system_disk_size      = 100
  image_type            = "AliyunLinux3"

  data_disks {              # 节点数据盘配置。
    category = "cloud_essd" # 节点数据盘种类。
    size     = 120          # 节点数据盘大小。
  }
}