From 7bf8863ee75a733ed0093450e1ad572e2ea619c4 Mon Sep 17 00:00:00 2001 From: Mario Lenz Date: Thu, 9 Jul 2020 22:29:46 +0200 Subject: [PATCH] Add advanced options to vmware_cluster_vsan --- .../fragments/260-vmware_cluster_vsan.yml | 3 + plugins/modules/vmware_cluster_vsan.py | 123 ++++++++++++++++-- .../targets/vmware_cluster_vsan/aliases | 1 + .../vmware_cluster_vsan/tasks/main.yml | 78 ++++++++++- 4 files changed, 193 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/260-vmware_cluster_vsan.yml diff --git a/changelogs/fragments/260-vmware_cluster_vsan.yml b/changelogs/fragments/260-vmware_cluster_vsan.yml new file mode 100644 index 000000000..f79bdd4c8 --- /dev/null +++ b/changelogs/fragments/260-vmware_cluster_vsan.yml @@ -0,0 +1,3 @@ +minor_changes: + - vmware_cluster_vsan - Implement advanced VSAN options (https://github.com/ansible-collections/vmware/issues/260) + - vmware_cluster_vsan now requires the vSAN Management SDK, which needs to be downloaded from VMware and installed manually. diff --git a/plugins/modules/vmware_cluster_vsan.py b/plugins/modules/vmware_cluster_vsan.py index d498cd96d..ea3e18020 100644 --- a/plugins/modules/vmware_cluster_vsan.py +++ b/plugins/modules/vmware_cluster_vsan.py @@ -20,9 +20,11 @@ author: - Joseph Callen (@jcpowermac) - Abhijeet Kasurde (@Akasurde) +- Mario Lenz (@mariolenz) requirements: - - Tested on ESXi 5.5 and 6.5. + - Tested on ESXi 6.7. - PyVmomi installed. + - vSAN Management SDK, which needs to be downloaded from VMware and installed manually. options: cluster_name: description: @@ -46,6 +48,32 @@ on VSAN-enabled hosts in the cluster. type: bool default: False + advanced_options: + version_added: "1.1.0" + description: + - Advanced VSAN Options. + suboptions: + automatic_rebalance: + description: + - If enabled, vSAN automatically rebalances (moves the data among disks) when a capacity disk fullness hits proactive rebalance threshold. + type: bool + disable_site_read_locality: + description: + - For vSAN streched clusters, reads to vSAN objects occur on the site the VM resides on. Setting this to True will force reads across all mirrors. + type: bool + large_cluster_support: + description: + - Allow > 32 VSAN hosts per cluster; if this is changed on an existing vSAN cluster, all hosts are required to reboot to apply this change. + type: bool + object_repair_timer: + description: + - Delay time in minutes for VSAN to wait for absent component to come back before starting to repair it. + type: int + thin_swap: + description: + - When enabled, swap objects won't reserve 100% space of their size on vSAN datastore. + type: bool + type: dict extends_documentation_fragment: - community.vmware.vmware.documentation @@ -62,6 +90,18 @@ enable_vsan: yes delegate_to: localhost +- name: Enable vSAN and automatic rebalancing + community.vmware.vmware_cluster_vsan: + hostname: '{{ vcenter_hostname }}' + username: '{{ vcenter_username }}' + password: '{{ vcenter_password }}' + datacenter_name: datacenter + cluster_name: cluster + enable_vsan: yes + advanced_options: + automatic_rebalance: True + delegate_to: localhost + - name: Enable vSAN and claim storage automatically community.vmware.vmware_cluster_vsan: hostname: "{{ vcenter_hostname }}" @@ -78,12 +118,21 @@ RETURN = r"""# """ +import traceback + try: from pyVmomi import vim, vmodl except ImportError: pass -from ansible.module_utils.basic import AnsibleModule +try: + import vsanapiutils + HAS_VSANPYTHONSDK = True +except ImportError: + VSANPYTHONSDK_IMP_ERR = traceback.format_exc() + HAS_VSANPYTHONSDK = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible_collections.community.vmware.plugins.module_utils.vmware import ( PyVmomi, TaskError, @@ -101,6 +150,7 @@ def __init__(self, module): self.enable_vsan = module.params['enable_vsan'] self.datacenter = None self.cluster = None + self.advanced_options = None self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name) if self.datacenter is None: @@ -110,6 +160,14 @@ def __init__(self, module): if self.cluster is None: self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name) + if module.params['advanced_options'] is not None: + self.advanced_options = module.params['advanced_options'] + client_stub = self.si._GetStub() + ssl_context = client_stub.schemeArgs.get('context') + apiVersion = vsanapiutils.GetLatestVmodlVersion(module.params['hostname']) + vcMos = vsanapiutils.GetVsanVcMos(client_stub, context=ssl_context, version=apiVersion) + self.vsanClusterConfigSystem = vcMos['vsan-cluster-config-system'] + def check_vsan_config_diff(self): """ Check VSAN configuration diff @@ -121,6 +179,25 @@ def check_vsan_config_diff(self): if vsan_config.enabled != self.enable_vsan or \ vsan_config.defaultConfig.autoClaimStorage != self.params.get('vsan_auto_claim_storage'): return True + + if self.advanced_options is not None: + vsan_config_info = self.vsanClusterConfigSystem.GetConfigInfoEx(self.cluster).extendedConfig + if self.advanced_options['automatic_rebalance'] is not None and \ + self.advanced_options['automatic_rebalance'] != vsan_config_info.proactiveRebalanceInfo.enabled: + return True + if self.advanced_options['disable_site_read_locality'] is not None and \ + self.advanced_options['disable_site_read_locality'] != vsan_config_info.disableSiteReadLocality: + return True + if self.advanced_options['large_cluster_support'] is not None and \ + self.advanced_options['large_cluster_support'] != vsan_config_info.largeScaleClusterSupport: + return True + if self.advanced_options['object_repair_timer'] is not None and \ + self.advanced_options['object_repair_timer'] != vsan_config_info.objectRepairTimer: + return True + if self.advanced_options['thin_swap'] is not None and \ + self.advanced_options['thin_swap'] != vsan_config_info.enableCustomizedSwapObject: + return True + return False def configure_vsan(self): @@ -132,14 +209,32 @@ def configure_vsan(self): if self.check_vsan_config_diff(): if not self.module.check_mode: - cluster_config_spec = vim.cluster.ConfigSpecEx() - cluster_config_spec.vsanConfig = vim.vsan.cluster.ConfigInfo() - cluster_config_spec.vsanConfig.enabled = self.enable_vsan - cluster_config_spec.vsanConfig.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo() - cluster_config_spec.vsanConfig.defaultConfig.autoClaimStorage = self.params.get('vsan_auto_claim_storage') + vSanSpec = vim.vsan.ReconfigSpec( + modify=True, + ) + vSanSpec.vsanClusterConfig = vim.vsan.cluster.ConfigInfo( + enabled=self.enable_vsan + ) + vSanSpec.vsanClusterConfig.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo( + autoClaimStorage=self.params.get('vsan_auto_claim_storage') + ) + if self.advanced_options is not None: + vSanSpec.extendedConfig = vim.vsan.VsanExtendedConfig() + if self.advanced_options['automatic_rebalance'] is not None: + vSanSpec.extendedConfig.proactiveRebalanceInfo = vim.vsan.ProactiveRebalanceInfo( + enabled=self.advanced_options['automatic_rebalance'] + ) + if self.advanced_options['disable_site_read_locality'] is not None: + vSanSpec.extendedConfig.disableSiteReadLocality = self.advanced_options['disable_site_read_locality'] + if self.advanced_options['large_cluster_support'] is not None: + vSanSpec.extendedConfig.largeScaleClusterSupport = self.advanced_options['large_cluster_support'] + if self.advanced_options['object_repair_timer'] is not None: + vSanSpec.extendedConfig.objectRepairTimer = self.advanced_options['object_repair_timer'] + if self.advanced_options['thin_swap'] is not None: + vSanSpec.extendedConfig.enableCustomizedSwapObject = self.advanced_options['thin_swap'] try: - task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True) - changed, result = wait_for_task(task) + task = self.vsanClusterConfigSystem.VsanClusterReconfig(self.cluster, vSanSpec) + changed, result = wait_for_task(vim.Task(task._moId, self.si._stub)) except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=to_native(runtime_fault.msg)) except vmodl.MethodFault as method_fault: @@ -163,6 +258,13 @@ def main(): # VSAN enable_vsan=dict(type='bool', default=False), vsan_auto_claim_storage=dict(type='bool', default=False), + advanced_options=dict(type='dict', options=dict( + automatic_rebalance=dict(type='bool', required=False), + disable_site_read_locality=dict(type='bool', required=False), + large_cluster_support=dict(type='bool', required=False), + object_repair_timer=dict(type='int', required=False), + thin_swap=dict(type='bool', required=False), + )), )) module = AnsibleModule( @@ -170,6 +272,9 @@ def main(): supports_check_mode=True, ) + if not HAS_VSANPYTHONSDK: + module.fail_json(msg=missing_required_lib('vSAN Management SDK for Python'), exception=VSANPYTHONSDK_IMP_ERR) + vmware_cluster_vsan = VMwareCluster(module) vmware_cluster_vsan.configure_vsan() diff --git a/tests/integration/targets/vmware_cluster_vsan/aliases b/tests/integration/targets/vmware_cluster_vsan/aliases index 559d0771d..093a0f0eb 100644 --- a/tests/integration/targets/vmware_cluster_vsan/aliases +++ b/tests/integration/targets/vmware_cluster_vsan/aliases @@ -1,3 +1,4 @@ +disabled cloud/vcenter needs/target/prepare_vmware_tests zuul/vmware/vcenter_only diff --git a/tests/integration/targets/vmware_cluster_vsan/tasks/main.yml b/tests/integration/targets/vmware_cluster_vsan/tasks/main.yml index 23657bb14..f4b0e1bb9 100644 --- a/tests/integration/targets/vmware_cluster_vsan/tasks/main.yml +++ b/tests/integration/targets/vmware_cluster_vsan/tasks/main.yml @@ -35,7 +35,62 @@ that: - "{{ cluster_vsan_result_0001.changed == true }}" - # Testcase 0002: Disable vSAN + # Testcase 0002: Enable vSAN again (check for idempotency) + - name: Enable vSAN again (check for idempotency) + vmware_cluster_vsan: + validate_certs: False + hostname: "{{ vcenter_hostname }}" + username: "{{ vcenter_username }}" + password: "{{ vcenter_password }}" + datacenter_name: "{{ dc1 }}" + cluster_name: test_cluster_vsan + enable_vsan: yes + register: cluster_vsan_result_0002 + + - name: Ensure vSAN is not enabled again + assert: + that: + - "{{ cluster_vsan_result_0002.changed == false }}" + + # Testcase 0003: Change object repair timer + - name: Change object repair timer + vmware_cluster_vsan: + validate_certs: False + hostname: "{{ vcenter_hostname }}" + username: "{{ vcenter_username }}" + password: "{{ vcenter_password }}" + datacenter_name: "{{ dc1 }}" + cluster_name: test_cluster_vsan + enable_vsan: yes + advanced_options: + object_repair_timer: 67 + register: cluster_vsan_result_0003 + + - name: Ensure object repair timer is changed + assert: + that: + - "{{ cluster_vsan_result_0003.changed == true }}" + + # Testcase 0004: Change object repair timer again (check for idempotency) + - name: Change object repair timer again (check for idempotency) + vmware_cluster_vsan: + validate_certs: False + hostname: "{{ vcenter_hostname }}" + username: "{{ vcenter_username }}" + password: "{{ vcenter_password }}" + datacenter_name: "{{ dc1 }}" + cluster_name: test_cluster_vsan + enable_vsan: yes + advanced_options: + object_repair_timer: 67 + register: cluster_vsan_result_0004 + + - name: Ensure object repair timer is not changed again + assert: + that: + - "{{ cluster_vsan_result_0004.changed == false }}" + + # Testcase 0005: Disable vSAN - name: Disable vSAN vmware_cluster_vsan: validate_certs: False @@ -45,12 +100,29 @@ datacenter_name: "{{ dc1 }}" cluster_name: test_cluster_vsan enable_vsan: no - register: cluster_vsan_result_0002 + register: cluster_vsan_result_0005 - name: Ensure vSAN is disabled assert: that: - - "{{ cluster_vsan_result_0002.changed == true }}" + - "{{ cluster_vsan_result_0005.changed == true }}" + + # Testcase 0006: Disable vSAN again (check for idempotency) + - name: Disable vSAN again (check for idempotency) + vmware_cluster_vsan: + validate_certs: False + hostname: "{{ vcenter_hostname }}" + username: "{{ vcenter_username }}" + password: "{{ vcenter_password }}" + datacenter_name: "{{ dc1 }}" + cluster_name: test_cluster_vsan + enable_vsan: no + register: cluster_vsan_result_0006 + + - name: Ensure vSAN is not disabled again + assert: + that: + - "{{ cluster_vsan_result_0006.changed == false }}" # Delete test cluster - name: Delete test cluster