diff --git a/CHANGELOG.md b/CHANGELOG.md index bd68c8b5e..8d1f812bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,21 @@ +## 1.9.3 (September 7, 2023) +[Full Changelog](https://github.com/nutanix/terraform-provider-nutanix/compare/feat/1.9.2...feat/1.9.3) + +**Merged pull request:** +- Setting machine type in updating virtual machines. [\#630](https://github.com/nutanix/terraform-provider-nutanix/pull/630) +- Added examples of role creation using nutanix terraform provider. [\#632](https://github.com/nutanix/terraform-provider-nutanix/pull/632) + +**Fixed bugs:** +- Updating gives error: Machine type must be set to Q35 for secure boot. [\#622](https://github.com/nutanix/terraform-provider-nutanix/issues/622) +- Machine type must be set to Q35 for secure boot. [\#494](https://github.com/nutanix/terraform-provider-nutanix/issues/494) + +**Closed issues:** +- Add support documentation in terraform. [\#611](https://github.com/nutanix/terraform-provider-nutanix/issues/611) + +**Closed pull request:** +- Fix Secure boot VMs when doing updates. [\#496](https://github.com/nutanix/terraform-provider-nutanix/pull/496) + + ## 1.9.2 (July 21, 2023) [Full Changelog](https://github.com/nutanix/terraform-provider-nutanix/compare/feat/1.9.1...feat/1.9.2) diff --git a/README.md b/README.md index 876d33da4..b08a5c1c4 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ Terraform provider plugin to integrate with Nutanix Enterprise Cloud -NOTE: The latest version of the Nutanix provider is [v1.9.2](https://github.com/nutanix/terraform-provider-nutanix/releases/tag/v1.9.2) +NOTE: The latest version of the Nutanix provider is [v1.9.3](https://github.com/nutanix/terraform-provider-nutanix/releases/tag/v1.9.3) Modules based on Terraform Nutanix Provider can be found here : [Modules](https://github.com/nutanix/terraform-provider-nutanix/tree/master/modules) ## Build, Quality Status diff --git a/examples/role/main.tf b/examples/role/main.tf new file mode 100644 index 000000000..70ac6be5e --- /dev/null +++ b/examples/role/main.tf @@ -0,0 +1,77 @@ +provider "nutanix" { + username = var.user + password = var.password + endpoint = var.endpoint + insecure = var.insecure + port = var.port + wait_timeout = 60 +} + +# Create Kubernetes Infrastructure Provision role +# --------------- +data "nutanix_permission" "k8s_infra_provision_permissions" { + for_each = toset(var.k8s_infra_provision_permissions) + permission_name = each.key +} + +resource "nutanix_role" "kubernetes_infrastructure_provision" { + name = "Kubernetes Infrastructure Provision" + description = "Access for Kubernetes cluster infrastructure VMs resources" + dynamic "permission_reference_list" { + for_each = data.nutanix_permission.k8s_infra_provision_permissions + content { + kind = "permission" + uuid = permission_reference_list.value.id + } + } +} + +data "nutanix_role" "kubernetes_infrastructure_provision" { + role_id = nutanix_role.kubernetes_infrastructure_provision.id +} + +# Create CSI System role +# --------------- +data "nutanix_permission" "csi_system_role_permissions" { + for_each = toset(var.csi_system_role_permissions) + permission_name = each.key +} + +resource "nutanix_role" "csi_system" { + name = "CSI System" + description = "Full access for Kubernetes cluster infrastructure resources for CSI" + dynamic "permission_reference_list" { + for_each = data.nutanix_permission.csi_system_role_permissions + content { + kind = "permission" + uuid = permission_reference_list.value.id + } + } +} + +data "nutanix_role" "csi_system" { + role_id = nutanix_role.csi_system.id +} + +# Create Kubernetes Data Services System role +# --------------- +data "nutanix_permission" "k8s_data_services_system_role_permissions" { + for_each = toset(var.k8s_data_services_system_role_permissions) + permission_name = each.key +} + +resource "nutanix_role" "k8s_data_services_system" { + name = "Kubernetes Data Services System" + description = "Full access for Kubernetes cluster infrastructure resources for Kubernetes Data Services" + dynamic "permission_reference_list" { + for_each = data.nutanix_permission.k8s_data_services_system_role_permissions + content { + kind = "permission" + uuid = permission_reference_list.value.id + } + } +} + +data "nutanix_role" "k8s_data_services_system" { + role_id = nutanix_role.k8s_data_services_system.id +} diff --git a/examples/role/outputs.tf b/examples/role/outputs.tf new file mode 100644 index 000000000..dcd9b0a50 --- /dev/null +++ b/examples/role/outputs.tf @@ -0,0 +1,11 @@ +output "k8s_infra_provision_role_id" { + value = data.nutanix_role.kubernetes_infrastructure_provision.id +} + +output "k8s_data_services_system_role_id" { + value = data.nutanix_role.k8s_data_services_system.id +} + +output "csi_system_role_id" { + value = data.nutanix_role.csi_system.id +} \ No newline at end of file diff --git a/examples/role/variables.tf b/examples/role/variables.tf new file mode 100644 index 000000000..c0ac2f64a --- /dev/null +++ b/examples/role/variables.tf @@ -0,0 +1,184 @@ +variable "user" { + type = string +} +variable "password" { + type = string +} +variable "endpoint" { + type = string +} +variable "insecure" { + type = bool +} +variable "port" { + type = number +} + +variable "k8s_infra_provision_permissions" { + type = list(string) + default = [ + "Create_Category_Mapping", + "Create_Image", + "Create_Or_Update_Name_Category", + "Create_Or_Update_Value_Category", + "Create_Virtual_Machine", + "Delete_Category_Mapping", + "Delete_Image", + "Delete_Name_Category", + "Delete_Value_Category", + "Delete_Virtual_Machine", + "Update_Category_Mapping", + "Update_Virtual_Machine_Project", + "Update_Virtual_Machine", + "View_Category_Mapping", + "View_Cluster", + "View_Image", + "View_Name_Category", + "View_Project", + "View_Subnet", + "View_Value_Category", + "View_Virtual_Machine" + ] +} + +variable "csi_system_role_permissions" { + type = list(string) + default = [ + "Create_Volume_Group_Disk", + "Delete_Volume_Group_Disk", + "Update_Volume_Group_Disk_Internal", + "View_Project", + "View_Task", + "Create_Or_Update_Value_Category", + "Create_Category", + "View_Name_Category", + "View_Category", + "View_External_iSCSI_Client", + "View_VM_Recovery_Point", + "View_Virtual_Machine", + "View_Volume_Group_Details", + "View_Volume_Group_Disks", + "View_Volume_Group_iSCSI_Attachments", + "View_Volume_Group_VM_Attachments", + "View_Volume_Group_Category_Associations", + "View_Volume_Group_Metadata", + "Create_Virtual_Machine", + "Restore_VM_Recovery_Point", + "Delete_Image", + "Associate_Volume_Group_Categories", + "Disassociate_Volume_Group_Categories", + "Update_Virtual_Machine_Project", + "Update_Container_Disks", + "View_Image", + "Create_Category_Mapping", + "Create_Volume_Group", + "Delete_Category_Mapping", + "Update_Category_Mapping", + "View_Category_Mapping", + "View_Subnet", + "Delete_Availability_Zone", + "Create_Or_Update_Name_Category", + "Delete_Volume_Group", + "View_Cluster", + "View_Value_Category", + "Delete_Category", + "Create_Image", + "Delete_Virtual_Machine", + "View_Container", + "View_Storage_Container", + "View_Any_Virtual_Machine", + "Create_Job", + "Update_Virtual_Machine", + "Update_Network_Function_Chain", + "Delete_Name_Category", + "Create_Vm_Snapshot", + "Update_Account", + "Delete_Value_Category", + "Update_Category", + "Update_Remote_Connection", + "Attach_Volume_Group_To_External_iSCSI_Client", + "Detach_Volume_Group_From_External_iSCSI_Client", + "Create_Consistency_Group", + "Update_Consistency_Group", + "View_Consistency_Group", + "Create_Recovery_Point", + "View_Recovery_Point", + "Delete_Recovery_Point", + "Set_Expiration_Time_Recovery_Point", + "View_Container_Datastore", + "View_Container_Stats", + "Update_Volume_Group_Details_Internal", + "Update_External_iSCSI_Client_Internal" + ] +} + +variable "k8s_data_services_system_role_permissions" { + type = list(string) + default = [ + "Create_Volume_Group_Disk", + "Delete_Volume_Group_Disk", + "Update_Volume_Group_Disk_Internal", + "View_Project", + "View_Task", + "Create_Or_Update_Value_Category", + "Create_Category", + "View_Name_Category", + "View_Category", + "View_External_iSCSI_Client", + "View_VM_Recovery_Point", + "View_Virtual_Machine", + "View_Volume_Group_Details", + "View_Volume_Group_Disks", + "View_Volume_Group_iSCSI_Attachments", + "View_Volume_Group_VM_Attachments", + "View_Volume_Group_Category_Associations", + "View_Volume_Group_Metadata", + "Create_Virtual_Machine", + "Restore_VM_Recovery_Point", + "Delete_Image", + "Associate_Volume_Group_Categories", + "Disassociate_Volume_Group_Categories", + "Update_Virtual_Machine_Project", + "Update_Container_Disks", + "View_Image", + "Create_Category_Mapping", + "Create_Volume_Group", + "Delete_Category_Mapping", + "Update_Category_Mapping", + "View_Category_Mapping", + "View_Subnet", + "Delete_Availability_Zone", + "Create_Or_Update_Name_Category", + "Delete_Volume_Group", + "View_Cluster", + "View_Value_Category", + "Delete_Category", + "Create_Image", + "Delete_Virtual_Machine", + "View_Container", + "View_Storage_Container", + "View_Any_Virtual_Machine", + "Create_Job", + "Update_Virtual_Machine", + "Update_Network_Function_Chain", + "Delete_Name_Category", + "Create_Vm_Snapshot", + "Update_Account", + "Delete_Value_Category", + "Update_Category", + "Update_Remote_Connection", + "Attach_Volume_Group_To_External_iSCSI_Client", + "Detach_Volume_Group_From_External_iSCSI_Client", + "Create_Consistency_Group", + "Update_Consistency_Group", + "View_Consistency_Group", + "Create_Recovery_Point", + "View_Recovery_Point", + "Delete_Recovery_Point", + "Set_Expiration_Time_Recovery_Point", + "View_Container_Datastore", + "View_Container_Stats", + "Update_Volume_Group_Details_Internal", + "Update_External_iSCSI_Client_Internal" + ] +} diff --git a/examples/role/versions.tf b/examples/role/versions.tf new file mode 100644 index 000000000..62383997f --- /dev/null +++ b/examples/role/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.9.2" + } + } +} diff --git a/nutanix/resource_nutanix_virtual_machine.go b/nutanix/resource_nutanix_virtual_machine.go index 29bb93aa7..c6926a5e6 100644 --- a/nutanix/resource_nutanix_virtual_machine.go +++ b/nutanix/resource_nutanix_virtual_machine.go @@ -1876,6 +1876,7 @@ func preFillResUpdateRequest(res *v3.VMResources, response *v3.VMIntentResponse) res.VgaConsoleEnabled = response.Spec.Resources.VgaConsoleEnabled res.HardwareClockTimezone = response.Spec.Resources.HardwareClockTimezone res.DiskList = response.Spec.Resources.DiskList + res.MachineType = response.Spec.Resources.MachineType nold := make([]*v3.VMNic, len(response.Spec.Resources.NicList)) diff --git a/nutanix/resource_nutanix_virtual_machine_test.go b/nutanix/resource_nutanix_virtual_machine_test.go index 3c365f5f9..4cd32b601 100644 --- a/nutanix/resource_nutanix_virtual_machine_test.go +++ b/nutanix/resource_nutanix_virtual_machine_test.go @@ -3,6 +3,7 @@ package nutanix import ( "fmt" "os" + "regexp" "strings" "testing" "time" @@ -563,6 +564,78 @@ func TestAccNutanixVirtualMachine_SysprepCustomKeyValues(t *testing.T) { }) } +func TestAccNutanixVirtualMachine_SecureBoot(t *testing.T) { + r := acctest.RandInt() + resourceName := "nutanix_virtual_machine.test" + name := fmt.Sprintf("test-vm-%d", r) + desc := "this is vm desc" + updatedName := fmt.Sprintf("test-vm-%d-updated", r) + updatedDesc := "this is updated desc" + memory := "200" + updatedMem := "300" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNutanixVirtualMachineDestroy, + Steps: []resource.TestStep{ + { + Config: testAccNutanixVMConfigWithSecureBoot(name, desc, memory), + Check: resource.ComposeTestCheckFunc( + testAccCheckNutanixVirtualMachineExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "description", desc), + resource.TestCheckResourceAttr(resourceName, "hardware_clock_timezone", "UTC"), + resource.TestCheckResourceAttr(resourceName, "power_state", "ON"), + resource.TestCheckResourceAttr(resourceName, "memory_size_mib", memory), + resource.TestCheckResourceAttr(resourceName, "num_sockets", "1"), + resource.TestCheckResourceAttr(resourceName, "num_vcpus_per_socket", "3"), + resource.TestCheckResourceAttr(resourceName, "machine_type", "Q35"), + resource.TestCheckResourceAttr(resourceName, "boot_type", "SECURE_BOOT"), + ), + }, + { + Config: testAccNutanixVMConfigWithSecureBoot(updatedName, updatedDesc, updatedMem), + Check: resource.ComposeTestCheckFunc( + testAccCheckNutanixVirtualMachineExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", updatedName), + resource.TestCheckResourceAttr(resourceName, "description", updatedDesc), + resource.TestCheckResourceAttr(resourceName, "hardware_clock_timezone", "UTC"), + resource.TestCheckResourceAttr(resourceName, "power_state", "ON"), + resource.TestCheckResourceAttr(resourceName, "memory_size_mib", updatedMem), + resource.TestCheckResourceAttr(resourceName, "num_sockets", "1"), + resource.TestCheckResourceAttr(resourceName, "num_vcpus_per_socket", "3"), + resource.TestCheckResourceAttr(resourceName, "machine_type", "Q35"), + resource.TestCheckResourceAttr(resourceName, "boot_type", "SECURE_BOOT"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"disk_list"}, + }, + }, + }) +} + +func TestAccNutanixVirtualMachine_SecureBootWithNoMachineType(t *testing.T) { + r := acctest.RandInt() + name := fmt.Sprintf("test-vm-%d", r) + desc := "this is vm desc" + memory := "200" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNutanixVirtualMachineDestroy, + Steps: []resource.TestStep{ + { + Config: testAccNutanixVMConfigWithSecureBootWithNoMachineType(name, desc, memory), + ExpectError: regexp.MustCompile("Machine type must be set to Q35 for secure boot."), + }, + }, + }) +} + func testAccCheckNutanixVirtualMachineExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -1447,3 +1520,85 @@ func testAccNutanixVMConfigSysprepCustomKeyValues(r int) string { } `, r) } + +func testAccNutanixVMConfigWithSecureBoot(name, desc, mem string) string { + return fmt.Sprintf(` + data "nutanix_clusters" "clusters" {} + + locals { + cluster1 = "${data.nutanix_clusters.clusters.entities.0.service_list.0 == "PRISM_CENTRAL" + ? data.nutanix_clusters.clusters.entities.1.metadata.uuid : data.nutanix_clusters.clusters.entities.0.metadata.uuid}" + } + + resource "nutanix_virtual_machine" "test" { + name = "%[1]s" + description = "%[2]s" + num_vcpus_per_socket = 3 + num_sockets = 1 + memory_size_mib = %[3]s + + cluster_uuid = "${local.cluster1}" + + boot_type = "SECURE_BOOT" + boot_device_order_list = ["DISK", "CDROM"] + machine_type = "Q35" + + disk_list { + disk_size_mib = 40240 + device_properties { + device_type = "DISK" + disk_address = { + "adapter_type" = "SCSI" + "device_index" = "0" + } + } + } + disk_list { + disk_size_mib = 40240 + device_properties { + device_type = "DISK" + disk_address = { + "adapter_type" = "SCSI" + "device_index" = "1" + } + } + } + } + + `, name, desc, mem) +} + +func testAccNutanixVMConfigWithSecureBootWithNoMachineType(name, desc, mem string) string { + return fmt.Sprintf(` + data "nutanix_clusters" "clusters" {} + + locals { + cluster1 = "${data.nutanix_clusters.clusters.entities.0.service_list.0 == "PRISM_CENTRAL" + ? data.nutanix_clusters.clusters.entities.1.metadata.uuid : data.nutanix_clusters.clusters.entities.0.metadata.uuid}" + } + + resource "nutanix_virtual_machine" "test" { + name = "%[1]s" + description = "%[2]s" + num_vcpus_per_socket = 3 + num_sockets = 1 + memory_size_mib = %[3]s + + cluster_uuid = "${local.cluster1}" + + boot_type = "SECURE_BOOT" + boot_device_order_list = ["DISK", "CDROM"] + disk_list { + disk_size_mib = 40240 + device_properties { + device_type = "DISK" + disk_address = { + "adapter_type" = "SCSI" + "device_index" = "0" + } + } + } + } + + `, name, desc, mem) +}