from fb_tools.common import pp, to_str
from fb_tools.errors import HandlerError, ExpectedHandlerError
from fb_tools.handler import BaseHandler
-from fb_tools.xlate import format_list
from fb_vmware.errors import VSphereExpectedError
-from fb_vmware.errors import VSphereDatacenterNotFoundError
-# from fb_tools.vsphere.server import VsphereServer
from fb_vmware.connect import VsphereConnection
-from fb_vmware.iface import VsphereVmInterface
-from fb_vmware.datastore import VsphereDatastore
-
-
# Own modules
from .. import print_section_start, print_section_end
from ..cobbler import Cobbler
-from ..errors import MSG_NO_CLUSTER, TempVmExistsError, NoDatastoreFoundError
-
from ..xlate import XLATOR
-__version__ = '2.4.4'
+__version__ = '2.4.5'
LOG = logging.getLogger(__name__)
TZ = pytz.timezone('Europe/Berlin')
return 0
- # -------------------------------------------------------------------------
- def check_for_temp_tpl_vm(self, no_error=False):
-
- LOG.info(_("First checking, whether {!r} exists ...").format(self.tpl_vm_fqdn))
- print_section_start(
- 'check_existing_template', "Checking for existence of template ...",
- collapsed=True)
- vm = self.vsphere.get_vm(self.tpl_vm_fqdn, no_error=no_error)
-
- if vm:
- if self.verbose > 1:
- LOG.debug(_("Temporary VM {n!r} exists, raising {e}.").format(
- n=self.tpl_vm_fqdn, e='TempVmExistsError'))
- if self.verbose > 2:
- msg = "Info about Temporary VM {!r}:".format(self.tpl_vm_fqdn)
- msg += '\n' + pp(vm.config)
- LOG.debug(msg)
- print_section_end('check_existing_template')
- raise TempVmExistsError(self.tpl_vm_fqdn)
-
- LOG.debug(_("Temporary VM {!r} does not exists, will be created.").format(
- self.tpl_vm_fqdn))
- print_section_end('check_existing_template')
-
- # -------------------------------------------------------------------------
- def get_temp_tpl_vm(self):
-
- print_section_start('get_temp_tpl_vm', "Get created template VM ...", collapsed=True)
- vm = self.vsphere.get_vm(self.tpl_vm_fqdn, as_vmw_obj=True)
- print_section_end('get_temp_tpl_vm')
-
- return vm
-
- # -------------------------------------------------------------------------
- def select_data_store(self):
-
- LOG.info(_(
- "Selecting a SAN based datastore with at least {:0.1f} GiB available "
- "space.").format(self.cfg.data_size_gb))
- print_section_start('select_data_store', "Selecting data store ...", collapsed=True)
-
- self.vsphere.get_ds_clusters()
- self.vsphere.get_datastores()
-
- ds_to_use = None
- if self.cfg.storage_cluster:
- ds_to_use = self.select_data_store_from_cluster()
- if ds_to_use:
- msg = _(
- "Got datastore {n!r} as a member of datastore cluster {c!r}.").format(
- n=ds_to_use.name, c=self.cfg.storage_cluster)
- LOG.info(msg)
- else:
- msg = MSG_NO_CLUSTER.format(
- size=self.cfg.data_size_gb, c_name=self.cfg.storage_cluster)
- LOG.warn(msg)
- if not ds_to_use:
- ds_to_use = self.select_simple_data_store()
-
- if not ds_to_use:
- print_section_end('select_data_store')
- raise NoDatastoreFoundError(self.cfg.data_size_gb)
-
- self.tpl_data_store = ds_to_use
- LOG.info(_("Using datastore {!r} for volume of temporary VM to create.").format(
- ds_to_use.name))
- print_section_end('select_data_store')
- return
-
- # -------------------------------------------------------------------------
- def select_data_store_from_cluster(self):
-
- # Searching for the right storage cluster
- c_name = self.cfg.storage_cluster
- used_c_name = None
- for cluster_name in self.vsphere.ds_clusters.keys():
- if cluster_name.lower() == c_name.lower():
- msg = _("Found storage cluster {!r}.").format(cluster_name)
- used_c_name = cluster_name
- break
- if not used_c_name:
- return None
-
- cluster = self.vsphere.ds_clusters[used_c_name]
- if cluster.free_space_gb <= self.cfg.data_size_gb:
- msg = _(
- "Cannot use datastore cluster {n!r}, free space "
- "{free:0.1f} GiB is less than {min:0.1f} GiB.").format(
- n=used_c_name, free=cluster.free_space_gb, min=self.cfg.data_size_gb)
- LOG.warn(msg)
- return None
-
- pod = self._get_storage_pod_obj(used_c_name)
- if not pod:
- msg = _("Could not get {c} object with name {n!r}.").format(
- c="vim.StoragePod", n=used_c_name)
- raise HandlerError(msg)
-
- vmconf = vim.vm.ConfigSpec()
- podsel = vim.storageDrs.PodSelectionSpec()
- podsel.storagePod = pod
-
- folder_obj = self.vsphere.get_vm_folder(self.cfg.folder)
-
- storagespec = vim.storageDrs.StoragePlacementSpec()
- storagespec.podSelectionSpec = podsel
- storagespec.type = 'create'
- storagespec.folder = folder_obj
- storagespec.resourcePool = self.cluster.resource_pool
- storagespec.configSpec = vmconf
-
- LOG.debug(_(
- "Trying to get a recommendation for a datastore from "
- "VSphere storageResourceManager ..."))
- if self.verbose > 2:
- msg = "storagespec:\n" + pp(storagespec)
- LOG.debug(msg)
- content = self.vsphere.service_instance.RetrieveContent()
- try:
- rec = content.storageResourceManager.RecommendDatastores(storageSpec=storagespec)
- rec_action = rec.recommendations[0].action[0]
- real_datastore_name = rec_action.destination.name
- except Exception as e:
- msg = _(
- "Got no recommendation for a datastore from VSphere storageResourceManager: "
- "{c} - {e}").format(c=e.__class__.__name__, e=e)
- LOG.warn(msg)
- return None
-
- datastore = self.vsphere.get_obj(content, [vim.Datastore], real_datastore_name)
- ds = VsphereDatastore.from_summary(
- datastore, appname=self.appname, verbose=self.verbose, base_dir=self.base_dir)
- return ds
-
- # -------------------------------------------------------------------------
- def _get_storage_pod_obj(self, used_c_name):
-
- content = self.vsphere.service_instance.RetrieveContent()
- dc = self.vsphere.get_obj(content, [vim.Datacenter], self.cfg.vsphere_info.dc)
- if not dc:
- raise VSphereDatacenterNotFoundError(self.cfg.vsphere_info.dc)
-
- for child in dc.datastoreFolder.childEntity:
- pod = self._get_storage_pod_obj_rec(child, used_c_name)
- if pod:
- return pod
-
- return pod
-
- # -------------------------------------------------------------------------
- def _get_storage_pod_obj_rec(self, child, used_c_name, depth=1):
-
- if hasattr(child, 'childEntity'):
- if depth > self.vsphere.max_search_depth:
- return None
- for sub_child in child.childEntity:
- pod = self._get_storage_pod_obj_rec(sub_child, used_c_name, depth + 1)
- if pod:
- return pod
-
- if isinstance(child, vim.StoragePod):
- if child.summary.name == used_c_name:
- return child
-
- return None
-
- # -------------------------------------------------------------------------
- def select_simple_data_store(self):
-
- usable_ds = []
- for ds in self.vsphere.datastores.values():
- if not ds.accessible:
- if self.verbose > 1:
- LOG.debug(_("Cannot use datastore {n!r} - not accessible.").format(n=ds.name))
- continue
- if ds.name not in self.cluster.datastores:
- if self.verbose > 1:
- LOG.debug(_("Cannot use datastore {n!r}, not in cluster {c!r}.").format(
- n=ds.name, c=self.cluster.name))
- continue
- if self.verbose > 3:
- LOG.debug(_("Checking datastore:") + '\n' + pp(ds.as_dict()))
- if ds.storage_type not in ('SAS', 'SSD', 'SATA'):
- if self.verbose > 1:
- LOG.debug(_("Cannot use datastore {n!r}, is of type {t!r}.").format(
- n=ds.name, t=ds.storage_type))
- continue
- if ds.free_space_gb <= self.cfg.data_size_gb:
- if self.verbose > 1:
- LOG.debug(_(
- "Cannot use datastore {n!r}, free space "
- "{free:0.1f} GiB is less than {min:0.1f} GiB.").format(
- n=ds.name, free=ds.free_space_gb, min=self.cfg.data_size_gb))
- continue
-
- usable_ds.append(ds)
-
- LOG.debug(_("Found {} usable datastores.").format(len(usable_ds)))
- if len(usable_ds) < 1:
- msg = _("Did not found an usable datastore.")
- raise ExpectedHandlerError(msg)
-
- for st_type in ('SATA', 'SAS', 'SSD'):
-
- ds_list = []
- for ds in usable_ds:
- if ds.storage_type == st_type:
- ds_list.append(ds)
- if not len(ds_list):
- continue
-
- return random.choice(ds_list)
-
- return None
-
- # -------------------------------------------------------------------------
- def create_vm(self):
-
- disk_size = self.cfg.data_size_gb
-
- iface = VsphereVmInterface(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- name='eth', network=self.tpl_network, network_name=self.cfg.network,
- summary='Primary network device')
-
- if self.verbose > 1:
- msg = _("Defined interface to create:") + "\n{}".format(pp(iface.as_dict()))
- LOG.debug(msg)
-
- vm_spec = self.vsphere.generate_vm_create_spec(
- name=self.tpl_vm_fqdn, datastore=self.tpl_data_store.name,
- disks=[disk_size], nw_interfaces=[iface], graphic_ram_mb=256,
- videao_ram_mb=32, boot_delay_secs=self.vm_boot_delay_secs, ram_mb=self.cfg.ram_mb,
- num_cpus=self.cfg.num_cpus, ds_with_timestamp=True,
- os_version=self.cfg.os_version, cfg_version=self.cfg.vmware_cfg_version)
-
- tpl_vm_folder = self.vsphere.get_vm_folder(self.cfg.folder)
- if self.verbose > 1:
- msg = _("VM-Folder object for template VM: {c} - {n!r}").format(
- c=tpl_vm_folder, n=tpl_vm_folder.name)
- msg += '\n' + pp(tpl_vm_folder.childType)
- LOG.debug(msg)
-
- self.vsphere.create_vm(
- name=self.tpl_vm_fqdn, vm_folder=tpl_vm_folder, vm_config_spec=vm_spec,
- pool=self.cluster.resource_pool, max_wait=self.cfg.max_wait_for_create_vm)
-
- # -------------------------------------------------------------------------
- def eval_tpl_ips(self):
-
- LOG.info(_("Trying to evaluate the IP address of the template VM ..."))
-
- initial_delay = (2 * self.vm_boot_delay_secs) + 120
-
- LOG.debug(_("Waiting initially for {} seconds:").format(initial_delay))
- print(' ==> ', end='', flush=True)
-
- start_time = time.time()
- cur_time = start_time
- cur_duration = 0
-
- while cur_duration <= initial_delay:
- time.sleep(1)
- cur_time = time.time()
- print('.', end='', flush=True)
- cur_duration = cur_time - start_time
- print('', flush=True)
-
- self.tpl_ips = self.cobbler.get_dhcp_ips(self.tpl_macaddress)
- if not self.tpl_ips:
- msg = _(
- "Did not got the IP address of MAC address {mac!r} after "
- "{delay} seconds.").format(mac=self.tpl_macaddress, delay=initial_delay)
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Got IP addresses for template VM:") + ' ' + format_list(self.tpl_ips))
-
# -------------------------------------------------------------------------
def wait_for_finish_install(self):
"VM {h!r} was not shut down after {t:0.1f} seconds, current state is {s!r}.").format(
h=self.tpl_ip, t=cur_diff, s=power_state))
- # -------------------------------------------------------------------------
- def change_mac_address(self):
-
- LOG.info(_("Setting a new, randomized MAC address for template VM ..."))
-
- last_tuple1 = random.randint(1, 254)
- last_tuple2 = random.randint(1, 254)
- new_mac = self.cfg.mac_address_template.format(last_tuple1, last_tuple2)
- LOG.debug(_("New MAC address: {!r}.").format(new_mac))
-
- vm = self.get_temp_tpl_vm()
- self.vsphere.set_mac_of_nic(vm, new_mac, nic_nr=0)
-
- # -------------------------------------------------------------------------
- def rotate_templates(self):
-
- LOG.info(_("Searching for existing templates and rotate them ..."))
- print_section_start('rotate_templates', "Rotating templates ...", collapsed=True)
- re_is_numeric = re.compile(r'^\s*(\d+)\s*$')
-
- pattern_tpl = r'^' + re.escape(self.cfg.template_name)
- re_tpl = re.compile(pattern_tpl, re.IGNORECASE)
-
- templates = self.vsphere.get_vms(re_tpl, is_template=True, as_vmw_obj=True)
- if not templates:
- LOG.info(_("Did not found any existing templates."))
- return
- msg = ngettext(
- "Found one existing template.", "Found {} existing templates.",
- len(templates)).format(len(templates))
- LOG.debug(msg)
-
- templates_ts = {}
- templates_sorted = []
- new_template_names = {}
-
- for template in templates:
- tpl_name = template.summary.config.name
- val_map = {}
- for extra_cfg in template.config.extraConfig:
- key = extra_cfg.key
- value = extra_cfg.value
- val_map[key] = value
- created = time.time()
- if 'created' in val_map:
- if val_map['created'] and re_is_numeric.match(val_map['created']):
- created = float(val_map['created'])
- ts_created = datetime.datetime.fromtimestamp(created, tz=TZ)
- LOG.debug(_("Found template {n!r}, created: {ts}.").format(
- n=tpl_name, ts=ts_created.isoformat(' ')))
- if self.verbose > 2:
- LOG.debug("Template Summary Config:\n{}".format(template.summary.config))
- LOG.debug("Template Extra Config:\n{}".format(pp(val_map)))
-
- templates_ts[tpl_name] = created
-
- for tpl_name in sorted(templates_ts.keys(), key=lambda tpl: templates_ts[tpl]):
- templates_sorted.append(tpl_name)
-
- LOG.debug(_("Templates sorted by creation date:") + '\n' + pp(templates_sorted))
- templates_sorted.reverse()
- templates_to_remove = []
- i = 0
- for tpl_name in templates_sorted:
- if i > self.cfg.max_nr_templates_stay - 2:
- templates_to_remove.append(tpl_name)
- i += 1
- templates_to_remove.reverse()
- if templates_to_remove:
- LOG.debug(_("Templates to remove:") + '\n' + pp(templates_to_remove))
- else:
- LOG.debug(_("There are no templates to remove."))
-
- for template in templates:
- tpl_name = template.summary.config.name
- if tpl_name in templates_to_remove:
- LOG.info(_("Removing template {!r} ...").format(tpl_name))
- self.vsphere.purge_vm(template)
- LOG.debug(_("Successful removed template {!r}.").format(tpl_name))
- continue
- if tpl_name.strip().lower() == self.cfg.template_name.strip().lower():
- created = templates_ts[tpl_name]
- ts_created = datetime.datetime.fromtimestamp(created, tz=TZ)
- i = 0
- dt = ts_created.strftime('%Y-%m-%d_%H-%M-%S')
- new_name = "{t}.{d}".format(t=tpl_name, d=dt)
- tname = new_name.strip().lower()
- while tname in new_template_names:
- new_name = "{t}.{d}-{i}".format(t=tpl_name, d=dt, i=i)
- tname = new_name.strip().lower()
- i += 1
- new_template_names[tname] = 1
- LOG.info(_("Renaming template {o!r} => {n!r} ...").format(o=tpl_name, n=new_name))
- task = template.Rename_Task(new_name)
- self.vsphere.wait_for_tasks([task])
- LOG.debug(_("Successful renamed template into {!r}.").format(new_name))
- else:
- tname = tpl_name.strip().lower()
- new_template_names[tname] = 1
-
- print_section_end('rotate_templates')
-
- # -------------------------------------------------------------------------
- def rename_and_change_vm(self):
-
- LOG.info(_("Renaming VM {o!r} => {n!r} ...").format(
- o=self.tpl_vm_fqdn, n=self.cfg.template_name))
- print_section_start(
- 'rename_and_change_vm', "Renaming VM and mark as template ...", collapsed=True)
-
- vm = self.get_temp_tpl_vm()
- task = vm.Rename_Task(self.cfg.template_name)
- self.vsphere.wait_for_tasks([task])
- LOG.debug(_("Successful renamed VM into {!r}.").format(self.cfg.template_name))
-
- LOG.info(_("Changing VM {!r} into a VMWare template ...").format(
- self.cfg.template_name))
- vm.MarkAsTemplate()
- LOG.debug(_("Object {!r} is now a VMWare template.").format(self.cfg.template_name))
- print_section_end('rename_and_change_vm')
-
# -------------------------------------------------------------------------
def create_root_authkeys(self):
# Standard modules
import logging
+import time
# Third party modules
from fb_tools.errors import ExpectedHandlerError
+from fb_tools.xlate import format_list
# Own modules
from ..xlate import XLATOR
-__version__ = '0.2.0'
+__version__ = '0.3.0'
LOG = logging.getLogger(__name__)
self.cfg.current_distro.name)
raise ExpectedHandlerError(msg)
+ # -------------------------------------------------------------------------
+ def eval_tpl_ips(self):
+
+ LOG.info(_("Trying to evaluate the IP address of the template VM ..."))
+
+ initial_delay = (2 * self.vm_boot_delay_secs) + 120
+
+ LOG.debug(_("Waiting initially for {} seconds:").format(initial_delay))
+ print(' ==> ', end='', flush=True)
+
+ start_time = time.time()
+ cur_time = start_time
+ cur_duration = 0
+
+ while cur_duration <= initial_delay:
+ time.sleep(1)
+ cur_time = time.time()
+ print('.', end='', flush=True)
+ cur_duration = cur_time - start_time
+ print('', flush=True)
+
+ self.tpl_ips = self.cobbler.get_dhcp_ips(self.tpl_macaddress)
+ if not self.tpl_ips:
+ msg = _(
+ "Did not got the IP address of MAC address {mac!r} after "
+ "{delay} seconds.").format(mac=self.tpl_macaddress, delay=initial_delay)
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Got IP addresses for template VM:") + ' ' + format_list(self.tpl_ips))
+
# =============================================================================
if __name__ == "__main__":
from __future__ import absolute_import, print_function
# Standard modules
+import datetime
import logging
+import random
+import re
+import time
# Third party modules
+import pytz
-# from fb_tools.common import pp, is_sequence
-# from fb_tools.errors import HandlerError
+from fb_tools.common import pp
+from fb_tools.errors import HandlerError, ExpectedHandlerError
-# Own modules
+from fb_vmware.errors import VSphereDatacenterNotFoundError
+from fb_vmware.iface import VsphereVmInterface
+from fb_vmware.datastore import VsphereDatastore
+
+from pyVmomi import vim
-# from .. import print_section_start, print_section_end
+# Own modules
+from .. import print_section_start, print_section_end
+from ..errors import MSG_NO_CLUSTER, TempVmExistsError, NoDatastoreFoundError
from ..xlate import XLATOR
-__version__ = '0.1.0'
+__version__ = '0.2.0'
LOG = logging.getLogger(__name__)
+TZ = pytz.timezone('Europe/Berlin')
_ = XLATOR.gettext
ngettext = XLATOR.ngettext
A mixin class for extending the CrTplHandler class for VMware/VSphere dependend methods.
"""
- pass
+ # -------------------------------------------------------------------------
+ def check_for_temp_tpl_vm(self, no_error=False):
+
+ LOG.info(_("First checking, whether {!r} exists ...").format(self.tpl_vm_fqdn))
+ print_section_start(
+ 'check_existing_template', "Checking for existence of template ...",
+ collapsed=True)
+ vm = self.vsphere.get_vm(self.tpl_vm_fqdn, no_error=no_error)
+
+ if vm:
+ if self.verbose > 1:
+ LOG.debug(_("Temporary VM {n!r} exists, raising {e}.").format(
+ n=self.tpl_vm_fqdn, e='TempVmExistsError'))
+ if self.verbose > 2:
+ msg = "Info about Temporary VM {!r}:".format(self.tpl_vm_fqdn)
+ msg += '\n' + pp(vm.config)
+ LOG.debug(msg)
+ print_section_end('check_existing_template')
+ raise TempVmExistsError(self.tpl_vm_fqdn)
+
+ LOG.debug(_("Temporary VM {!r} does not exists, will be created.").format(
+ self.tpl_vm_fqdn))
+ print_section_end('check_existing_template')
+
+ # -------------------------------------------------------------------------
+ def get_temp_tpl_vm(self):
+
+ print_section_start('get_temp_tpl_vm', "Get created template VM ...", collapsed=True)
+ vm = self.vsphere.get_vm(self.tpl_vm_fqdn, as_vmw_obj=True)
+ print_section_end('get_temp_tpl_vm')
+
+ return vm
+
+ # -------------------------------------------------------------------------
+ def select_data_store(self):
+
+ LOG.info(_(
+ "Selecting a SAN based datastore with at least {:0.1f} GiB available "
+ "space.").format(self.cfg.data_size_gb))
+ print_section_start('select_data_store', "Selecting data store ...", collapsed=True)
+
+ self.vsphere.get_ds_clusters()
+ self.vsphere.get_datastores()
+
+ ds_to_use = None
+ if self.cfg.storage_cluster:
+ ds_to_use = self.select_data_store_from_cluster()
+ if ds_to_use:
+ msg = _(
+ "Got datastore {n!r} as a member of datastore cluster {c!r}.").format(
+ n=ds_to_use.name, c=self.cfg.storage_cluster)
+ LOG.info(msg)
+ else:
+ msg = MSG_NO_CLUSTER.format(
+ size=self.cfg.data_size_gb, c_name=self.cfg.storage_cluster)
+ LOG.warn(msg)
+ if not ds_to_use:
+ ds_to_use = self.select_simple_data_store()
+
+ if not ds_to_use:
+ print_section_end('select_data_store')
+ raise NoDatastoreFoundError(self.cfg.data_size_gb)
+
+ self.tpl_data_store = ds_to_use
+ LOG.info(_("Using datastore {!r} for volume of temporary VM to create.").format(
+ ds_to_use.name))
+ print_section_end('select_data_store')
+ return
+
+ # -------------------------------------------------------------------------
+ def select_data_store_from_cluster(self):
+
+ # Searching for the right storage cluster
+ c_name = self.cfg.storage_cluster
+ used_c_name = None
+ for cluster_name in self.vsphere.ds_clusters.keys():
+ if cluster_name.lower() == c_name.lower():
+ msg = _("Found storage cluster {!r}.").format(cluster_name)
+ used_c_name = cluster_name
+ break
+ if not used_c_name:
+ return None
+
+ cluster = self.vsphere.ds_clusters[used_c_name]
+ if cluster.free_space_gb <= self.cfg.data_size_gb:
+ msg = _(
+ "Cannot use datastore cluster {n!r}, free space "
+ "{free:0.1f} GiB is less than {min:0.1f} GiB.").format(
+ n=used_c_name, free=cluster.free_space_gb, min=self.cfg.data_size_gb)
+ LOG.warn(msg)
+ return None
+
+ pod = self._get_storage_pod_obj(used_c_name)
+ if not pod:
+ msg = _("Could not get {c} object with name {n!r}.").format(
+ c="vim.StoragePod", n=used_c_name)
+ raise HandlerError(msg)
+
+ vmconf = vim.vm.ConfigSpec()
+ podsel = vim.storageDrs.PodSelectionSpec()
+ podsel.storagePod = pod
+
+ folder_obj = self.vsphere.get_vm_folder(self.cfg.folder)
+
+ storagespec = vim.storageDrs.StoragePlacementSpec()
+ storagespec.podSelectionSpec = podsel
+ storagespec.type = 'create'
+ storagespec.folder = folder_obj
+ storagespec.resourcePool = self.cluster.resource_pool
+ storagespec.configSpec = vmconf
+
+ LOG.debug(_(
+ "Trying to get a recommendation for a datastore from "
+ "VSphere storageResourceManager ..."))
+ if self.verbose > 2:
+ msg = "storagespec:\n" + pp(storagespec)
+ LOG.debug(msg)
+ content = self.vsphere.service_instance.RetrieveContent()
+ try:
+ rec = content.storageResourceManager.RecommendDatastores(storageSpec=storagespec)
+ rec_action = rec.recommendations[0].action[0]
+ real_datastore_name = rec_action.destination.name
+ except Exception as e:
+ msg = _(
+ "Got no recommendation for a datastore from VSphere storageResourceManager: "
+ "{c} - {e}").format(c=e.__class__.__name__, e=e)
+ LOG.warn(msg)
+ return None
+
+ datastore = self.vsphere.get_obj(content, [vim.Datastore], real_datastore_name)
+ ds = VsphereDatastore.from_summary(
+ datastore, appname=self.appname, verbose=self.verbose, base_dir=self.base_dir)
+ return ds
+
+ # -------------------------------------------------------------------------
+ def _get_storage_pod_obj(self, used_c_name):
+
+ content = self.vsphere.service_instance.RetrieveContent()
+ dc = self.vsphere.get_obj(content, [vim.Datacenter], self.cfg.vsphere_info.dc)
+ if not dc:
+ raise VSphereDatacenterNotFoundError(self.cfg.vsphere_info.dc)
+
+ for child in dc.datastoreFolder.childEntity:
+ pod = self._get_storage_pod_obj_rec(child, used_c_name)
+ if pod:
+ return pod
+
+ return pod
+
+ # -------------------------------------------------------------------------
+ def _get_storage_pod_obj_rec(self, child, used_c_name, depth=1):
+
+ if hasattr(child, 'childEntity'):
+ if depth > self.vsphere.max_search_depth:
+ return None
+ for sub_child in child.childEntity:
+ pod = self._get_storage_pod_obj_rec(sub_child, used_c_name, depth + 1)
+ if pod:
+ return pod
+
+ if isinstance(child, vim.StoragePod):
+ if child.summary.name == used_c_name:
+ return child
+
+ return None
+
+ # -------------------------------------------------------------------------
+ def select_simple_data_store(self):
+
+ usable_ds = []
+ for ds in self.vsphere.datastores.values():
+ if not ds.accessible:
+ if self.verbose > 1:
+ LOG.debug(_("Cannot use datastore {n!r} - not accessible.").format(n=ds.name))
+ continue
+ if ds.name not in self.cluster.datastores:
+ if self.verbose > 1:
+ LOG.debug(_("Cannot use datastore {n!r}, not in cluster {c!r}.").format(
+ n=ds.name, c=self.cluster.name))
+ continue
+ if self.verbose > 3:
+ LOG.debug(_("Checking datastore:") + '\n' + pp(ds.as_dict()))
+ if ds.storage_type not in ('SAS', 'SSD', 'SATA'):
+ if self.verbose > 1:
+ LOG.debug(_("Cannot use datastore {n!r}, is of type {t!r}.").format(
+ n=ds.name, t=ds.storage_type))
+ continue
+ if ds.free_space_gb <= self.cfg.data_size_gb:
+ if self.verbose > 1:
+ LOG.debug(_(
+ "Cannot use datastore {n!r}, free space "
+ "{free:0.1f} GiB is less than {min:0.1f} GiB.").format(
+ n=ds.name, free=ds.free_space_gb, min=self.cfg.data_size_gb))
+ continue
+
+ usable_ds.append(ds)
+
+ LOG.debug(_("Found {} usable datastores.").format(len(usable_ds)))
+ if len(usable_ds) < 1:
+ msg = _("Did not found an usable datastore.")
+ raise ExpectedHandlerError(msg)
+
+ for st_type in ('SATA', 'SAS', 'SSD'):
+
+ ds_list = []
+ for ds in usable_ds:
+ if ds.storage_type == st_type:
+ ds_list.append(ds)
+ if not len(ds_list):
+ continue
+
+ return random.choice(ds_list)
+
+ return None
+
+ # -------------------------------------------------------------------------
+ def create_vm(self):
+
+ disk_size = self.cfg.data_size_gb
+
+ iface = VsphereVmInterface(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ name='eth', network=self.tpl_network, network_name=self.cfg.network,
+ summary='Primary network device')
+
+ if self.verbose > 1:
+ msg = _("Defined interface to create:") + "\n{}".format(pp(iface.as_dict()))
+ LOG.debug(msg)
+
+ vm_spec = self.vsphere.generate_vm_create_spec(
+ name=self.tpl_vm_fqdn, datastore=self.tpl_data_store.name,
+ disks=[disk_size], nw_interfaces=[iface], graphic_ram_mb=256,
+ videao_ram_mb=32, boot_delay_secs=self.vm_boot_delay_secs, ram_mb=self.cfg.ram_mb,
+ num_cpus=self.cfg.num_cpus, ds_with_timestamp=True,
+ os_version=self.cfg.os_version, cfg_version=self.cfg.vmware_cfg_version)
+
+ tpl_vm_folder = self.vsphere.get_vm_folder(self.cfg.folder)
+ if self.verbose > 1:
+ msg = _("VM-Folder object for template VM: {c} - {n!r}").format(
+ c=tpl_vm_folder, n=tpl_vm_folder.name)
+ msg += '\n' + pp(tpl_vm_folder.childType)
+ LOG.debug(msg)
+
+ self.vsphere.create_vm(
+ name=self.tpl_vm_fqdn, vm_folder=tpl_vm_folder, vm_config_spec=vm_spec,
+ pool=self.cluster.resource_pool, max_wait=self.cfg.max_wait_for_create_vm)
+
+ # -------------------------------------------------------------------------
+ def change_mac_address(self):
+
+ LOG.info(_("Setting a new, randomized MAC address for template VM ..."))
+
+ last_tuple1 = random.randint(1, 254)
+ last_tuple2 = random.randint(1, 254)
+ new_mac = self.cfg.mac_address_template.format(last_tuple1, last_tuple2)
+ LOG.debug(_("New MAC address: {!r}.").format(new_mac))
+
+ vm = self.get_temp_tpl_vm()
+ self.vsphere.set_mac_of_nic(vm, new_mac, nic_nr=0)
+
+ # -------------------------------------------------------------------------
+ def rotate_templates(self):
+
+ LOG.info(_("Searching for existing templates and rotate them ..."))
+ print_section_start('rotate_templates', "Rotating templates ...", collapsed=True)
+ re_is_numeric = re.compile(r'^\s*(\d+)\s*$')
+
+ pattern_tpl = r'^' + re.escape(self.cfg.template_name)
+ re_tpl = re.compile(pattern_tpl, re.IGNORECASE)
+
+ templates = self.vsphere.get_vms(re_tpl, is_template=True, as_vmw_obj=True)
+ if not templates:
+ LOG.info(_("Did not found any existing templates."))
+ return
+ msg = ngettext(
+ "Found one existing template.", "Found {} existing templates.",
+ len(templates)).format(len(templates))
+ LOG.debug(msg)
+
+ templates_ts = {}
+ templates_sorted = []
+ new_template_names = {}
+
+ for template in templates:
+ tpl_name = template.summary.config.name
+ val_map = {}
+ for extra_cfg in template.config.extraConfig:
+ key = extra_cfg.key
+ value = extra_cfg.value
+ val_map[key] = value
+ created = time.time()
+ if 'created' in val_map:
+ if val_map['created'] and re_is_numeric.match(val_map['created']):
+ created = float(val_map['created'])
+ ts_created = datetime.datetime.fromtimestamp(created, tz=TZ)
+ LOG.debug(_("Found template {n!r}, created: {ts}.").format(
+ n=tpl_name, ts=ts_created.isoformat(' ')))
+ if self.verbose > 2:
+ LOG.debug("Template Summary Config:\n{}".format(template.summary.config))
+ LOG.debug("Template Extra Config:\n{}".format(pp(val_map)))
+
+ templates_ts[tpl_name] = created
+
+ for tpl_name in sorted(templates_ts.keys(), key=lambda tpl: templates_ts[tpl]):
+ templates_sorted.append(tpl_name)
+
+ LOG.debug(_("Templates sorted by creation date:") + '\n' + pp(templates_sorted))
+ templates_sorted.reverse()
+ templates_to_remove = []
+ i = 0
+ for tpl_name in templates_sorted:
+ if i > self.cfg.max_nr_templates_stay - 2:
+ templates_to_remove.append(tpl_name)
+ i += 1
+ templates_to_remove.reverse()
+ if templates_to_remove:
+ LOG.debug(_("Templates to remove:") + '\n' + pp(templates_to_remove))
+ else:
+ LOG.debug(_("There are no templates to remove."))
+
+ for template in templates:
+ tpl_name = template.summary.config.name
+ if tpl_name in templates_to_remove:
+ LOG.info(_("Removing template {!r} ...").format(tpl_name))
+ self.vsphere.purge_vm(template)
+ LOG.debug(_("Successful removed template {!r}.").format(tpl_name))
+ continue
+ if tpl_name.strip().lower() == self.cfg.template_name.strip().lower():
+ created = templates_ts[tpl_name]
+ ts_created = datetime.datetime.fromtimestamp(created, tz=TZ)
+ i = 0
+ dt = ts_created.strftime('%Y-%m-%d_%H-%M-%S')
+ new_name = "{t}.{d}".format(t=tpl_name, d=dt)
+ tname = new_name.strip().lower()
+ while tname in new_template_names:
+ new_name = "{t}.{d}-{i}".format(t=tpl_name, d=dt, i=i)
+ tname = new_name.strip().lower()
+ i += 1
+ new_template_names[tname] = 1
+ LOG.info(_("Renaming template {o!r} => {n!r} ...").format(o=tpl_name, n=new_name))
+ task = template.Rename_Task(new_name)
+ self.vsphere.wait_for_tasks([task])
+ LOG.debug(_("Successful renamed template into {!r}.").format(new_name))
+ else:
+ tname = tpl_name.strip().lower()
+ new_template_names[tname] = 1
+
+ print_section_end('rotate_templates')
+
+ # -------------------------------------------------------------------------
+ def rename_and_change_vm(self):
+
+ LOG.info(_("Renaming VM {o!r} => {n!r} ...").format(
+ o=self.tpl_vm_fqdn, n=self.cfg.template_name))
+ print_section_start(
+ 'rename_and_change_vm', "Renaming VM and mark as template ...", collapsed=True)
+
+ vm = self.get_temp_tpl_vm()
+ task = vm.Rename_Task(self.cfg.template_name)
+ self.vsphere.wait_for_tasks([task])
+ LOG.debug(_("Successful renamed VM into {!r}.").format(self.cfg.template_name))
+
+ LOG.info(_("Changing VM {!r} into a VMWare template ...").format(
+ self.cfg.template_name))
+ vm.MarkAsTemplate()
+ LOG.debug(_("Object {!r} is now a VMWare template.").format(self.cfg.template_name))
+ print_section_end('rename_and_change_vm')
# =============================================================================