bin_dir = os.path.dirname(os.path.realpath(__file__))
base_dir = os.path.abspath(os.path.join(bin_dir, '..'))
lib_dir = os.path.join(base_dir, 'lib')
-module_dir = os.path.join(lib_dir, 'cr_tf')
+module_dir = os.path.join(lib_dir, 'create_terraform')
if os.path.exists(module_dir):
sys.path.insert(0, lib_dir)
from fb_tools.common import pp
-from cr_tf.app import CrTfApplication
+from create_terraform.app import CrTfApplication
log = logging.getLogger(__name__)
__author__ = 'Frank Brehm <frank.brehm@pixelpark.com>'
-__copyright__ = '(C) 2019 by Frank Brehm, Pixelpark GmbH, Berlin'
+__copyright__ = '(C) 2024 by Frank Brehm, Pixelpark GmbH, Berlin'
appname = os.path.basename(sys.argv[0])
+++ /dev/null
-#!/bin/env python3
-# -*- coding: utf-8 -*-
-
-__version__ = '1.8.10'
-
-MIN_VERSION_TERRAFORM = '1.6.5'
-MAX_VERSION_TERRAFORM = '1.9.0'
-
-MIN_VERSION_VSPHERE_PROVIDER = '2.5.1'
-
-CFGFILE_BASENAME = 'create-terraform.ini'
-
-# vim: ts=4 et list
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2024 by Frank Brehm, Berlin
-@summary: The module for the application object.
-"""
-from __future__ import absolute_import
-
-# Standard modules
-import sys
-import os
-import logging
-import re
-import argparse
-import signal
-
-from pathlib import Path
-
-# Third party modules
-from fb_tools.common import pp
-from fb_tools.app import BaseApplication
-from fb_tools.errors import ExpectedHandlerError, CommandNotFoundError
-from fb_tools.config import ConfigError
-from fb_tools.common import generate_password
-
-# Own modules
-from . import __version__ as __pkg_version__
-from . import CFGFILE_BASENAME
-
-from .errors import TerraformHandlerError
-
-from .config import CrTfConfiguration
-
-from .handler import CreateTerraformHandler
-
-from .terraform.vm import TerraformVm
-
-from .xlate import __module_dir__ as __xlate_module_dir__
-from .xlate import __base_dir__ as __xlate_base_dir__
-from .xlate import __mo_file__ as __xlate_mo_file__
-from .xlate import XLATOR, LOCALE_DIR, DOMAIN
-
-__version__ = '1.3.3'
-LOG = logging.getLogger(__name__)
-
-SIGNAL_NAMES = {
- signal.SIGHUP: 'HUP',
- signal.SIGINT: 'INT',
- signal.SIGABRT: 'ABRT',
- signal.SIGTERM: 'TERM',
- signal.SIGKILL: 'KILL',
- signal.SIGQUIT: 'QUIT',
- signal.SIGUSR1: 'USR1',
- signal.SIGUSR2: 'USR2',
-}
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-
-# =============================================================================
-class CfgFileOptionAction(argparse.Action):
-
- # -------------------------------------------------------------------------
- def __init__(self, option_strings, *args, **kwargs):
-
- super(CfgFileOptionAction, self).__init__(
- option_strings=option_strings, *args, **kwargs)
-
- # -------------------------------------------------------------------------
- def __call__(self, parser, namespace, values, option_string=None):
-
- if values is None:
- setattr(namespace, self.dest, None)
- return
-
- path = Path(values)
- if not path.exists():
- msg = _("File {!r} does not exists.").format(values)
- raise argparse.ArgumentError(self, msg)
- if not path.is_file():
- msg = _("File {!r} is not a regular file.").format(values)
- raise argparse.ArgumentError(self, msg)
-
- setattr(namespace, self.dest, path.resolve())
-
-
-# =============================================================================
-class YamlFileOptionAction(argparse.Action):
-
- # -------------------------------------------------------------------------
- def __init__(self, option_strings, *args, **kwargs):
-
- super(YamlFileOptionAction, self).__init__(
- option_strings=option_strings, *args, **kwargs)
-
- # -------------------------------------------------------------------------
- def __call__(self, parser, namespace, values, option_string=None):
-
- yaml_file_paths = []
-
- for value in values:
- path = Path(value)
- if not path.exists():
- msg = _("File {!r} does not exists.").format(values)
- raise argparse.ArgumentError(self, msg)
- if not path.is_file():
- msg = _("File {!r} is not a regular file.").format(values)
- raise argparse.ArgumentError(self, msg)
- yaml_file_paths.append(path.resolve())
-
- setattr(namespace, self.dest, yaml_file_paths)
-
-
-# =============================================================================
-class StopStepOptionAction(argparse.Action):
-
- # -------------------------------------------------------------------------
- def __init__(self, option_strings, *args, **kwargs):
-
- super(StopStepOptionAction, self).__init__(
- option_strings=option_strings, *args, **kwargs)
-
- # -------------------------------------------------------------------------
- def __call__(self, parser, namespace, values, option_string=None):
-
- step = values
- if step == '?':
- width = 1
- for step in CreateTerraformHandler.steps:
- if len(step) > width:
- width = len(step)
-
- print("\n" + _("The following steps to interrupt the execution after are available:"))
-
- for step in CreateTerraformHandler.steps:
- desc = _('<no description>')
- if step in CreateTerraformHandler.step_desc:
- desc = CreateTerraformHandler.step_desc[step]
- line = ' * {step:<{width}} {desc}'.format(
- step=step, width=width, desc=desc)
- print(line)
-
- print()
- sys.exit(0)
-
- setattr(namespace, self.dest, step)
-
-
-# =============================================================================
-class CrTfApplication(BaseApplication):
- """
- Class for the application objects.
- """
-
- show_simulate_option = True
-
- re_prefix = re.compile(r'^[a-z0-9][a-z0-9_]*$', re.IGNORECASE)
- re_anum = re.compile(r'[^A-Z0-9_]+', re.IGNORECASE)
- fake_root_passwd = generate_password(12)
-
- # -------------------------------------------------------------------------
- def __init__(
- self, appname=None, verbose=0, version=__pkg_version__, base_dir=None,
- terminal_has_colors=False, initialized=False, usage=None, description=None,
- argparse_epilog=None, argparse_prefix_chars='-', env_prefix=None):
-
- self.yaml_file = None
- self.config = None
- self.handler = None
- self._cfg_dir = None
- self._cfg_file = None
-
- desc = _(
- "Creates or updates a directory with a terraform environment "
- "on base of a given YAML file.")
-
- super(CrTfApplication, self).__init__(
- appname=appname, verbose=verbose, version=version, base_dir=base_dir,
- description=desc, terminal_has_colors=terminal_has_colors, initialized=False,
- )
-
- # -------------------------------------------------------------------------
- @property
- def cfg_dir(self):
- """Directory of the configuration file."""
- return self._cfg_dir
-
- # -------------------------------------------------------------------------
- @property
- def cfg_file(self):
- """Configuration file."""
- return self._cfg_file
-
- # -------------------------------------------------------------------------
- def _search_cfg_file(self):
-
- search_dirs = []
- search_dirs.append(self.base_dir.parent)
- search_dirs.append(self.base_dir.parent / 'etc')
- search_dirs.append(self.base_dir / 'etc')
-
- for sdir in search_dirs:
- cfg_file = sdir / CFGFILE_BASENAME
- LOG.debug(_("Searching for config file {!r} ...").format(str(cfg_file)))
- if cfg_file.exists() and cfg_file.is_file():
- self._cfg_dir = sdir
- self._cfg_file = cfg_file
- return
- self._cfg_dir = self.base_dir / 'etc'
- self._cfg_file = self.base_dir.parent / CFGFILE_BASENAME
-
- # -------------------------------------------------------------------------
- def _warn_about_missing_cfg(self):
-
- cur_dir = Path.cwd()
-
- default_conf_file = self.cfg_dir / (CFGFILE_BASENAME + '.default')
- default_cfg_file_rel = os.path.relpath(str(default_conf_file), str(cur_dir))
-
- cfg1 = self.base_dir.parent / CFGFILE_BASENAME
- cfg1 = os.path.relpath(str(cfg1), str(cur_dir))
-
- cfg2 = self.base_dir.parent / 'etc' / CFGFILE_BASENAME
- cfg2 = os.path.relpath(str(cfg2), str(cur_dir))
-
- cfg3 = self.cfg_dir / CFGFILE_BASENAME
- cfg3 = os.path.relpath(str(cfg3), str(cur_dir))
-
- # cfg_file_rel = os.path.relpath(str(self.cfg_file), str(cur_dir))
- msg = (_(
- "Config file {f!r} not found, using defaults.\n"
- "To avoid this message, you may copy {d!r} to {c1!r}, {c2!r} or {c3!r} "
- "and fill out all necessary entries, e.g. the passwords and API keys.").format(
- f=CFGFILE_BASENAME, d=default_cfg_file_rel, c1=cfg1, c2=cfg2, c3=cfg3))
- LOG.warn(msg)
-
- # -------------------------------------------------------------------------
- def _read_cfg(self):
-
- if self.cfg_file.exists():
- try:
- self.config.read()
- except ConfigError as e:
- LOG.error(_("Error in configuration:") + " " + str(e))
- self.exit(1)
- if self.verbose > 3:
- LOG.debug(_("Read configuration:") + '\n' + pp(self.config.as_dict()))
-
- # -------------------------------------------------------------------------
- def post_init(self):
- """
- Method to execute before calling run(). Here could be done some
- finishing actions after reading in commandline parameters,
- configuration a.s.o.
-
- This method could be overwritten by descendant classes, these
- methhods should allways include a call to post_init() of the
- parent class.
- """
-
- self.initialized = False
-
- self.init_logging()
-
- self._search_cfg_file()
-
- self.perform_arg_parser()
-
- if not self.cfg_file.exists():
- self._warn_about_missing_cfg()
-
- self.config = CrTfConfiguration(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- config_file=self.cfg_file)
-
- self.config.init_vsphere_defaults()
-
- self._read_cfg()
-
- if self.config.verbose > self.verbose:
- self.verbose = self.config.verbose
- if self.config.simulate:
- self.simulate = True
-
- self.config.initialized = True
-
- if self.config.puppet_envs_delete:
- LOG.debug(_("Removing allowed puppet environments ..."))
- for env in self.config.puppet_envs_delete:
- if env in TerraformVm.valid_puppet_environments:
- if self.verbose > 1:
- LOG.debug(_("Removing puppet environment {!r} ...").format(env))
- TerraformVm.valid_puppet_environments.remove(env)
-
- if self.config.puppet_envs_add:
- LOG.debug(_("Adding allowed puppet environments ..."))
- for env in self.config.puppet_envs_add:
- if env not in TerraformVm.valid_puppet_environments:
- if self.verbose > 1:
- LOG.debug(_("Adding puppet environment {!r} ...").format(env))
- TerraformVm.valid_puppet_environments.append(env)
-
- TerraformVm.valid_puppet_environments.sort()
- LOG.debug(
- _("Allowed puppet environments:") + ' ' + pp(TerraformVm.valid_puppet_environments))
-
- self.perform_arg_parser_rest()
-
- if not self.config.pdns_api_key:
- url = 'http'
- if self.config.pdns_api_use_https:
- url += 's'
- url += '://' + self.config.pdns_master_server
- url += ':{}'.format(self.config.pdns_api_port)
- if self.config.pdns_api_path_prefix:
- url += self.config.pdns_api_path_prefix
- prompt = self.colored(_('PowerDNS API key for {!r}').format(url), 'AQUA')
- print('')
- self.config.pdns_api_key = self.get_secret(
- prompt=prompt, item_name=self.colored(_('PowerDNS API key'), 'AQUA'))
- print('')
-
-# if not self.config.vm_root_password:
-# # Using faked root password, because it is currently not used.
-# # TODO: When the root password is used, then substitute fake password
-# # by prompting for the real root password.
-# LOG.debug(_(
-# "Using faked root password {!r} - "
-# "but this is currently not used.").format(self.fake_root_passwd))
-# self.config.vm_root_password = self.fake_root_passwd
-
- self.handler = CreateTerraformHandler(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- simulate=self.simulate, force=self.force, config=self.config,
- terminal_has_colors=self.terminal_has_colors)
-
- if self.args.stop_after:
- self.handler.stop_at_step = self.args.stop_after
-
- self.handler.set_tz(self.config.tz_name)
-
- try:
- self.handler.init_handlers()
- except (CommandNotFoundError, ExpectedHandlerError) as e:
- LOG.error(str(e))
- self.exit(5)
- self.handler.initialized = True
-
- self.initialized = True
-
- # -------------------------------------------------------------------------
- def as_dict(self, short=True):
- """
- Transforms the elements of the object into a dict
-
- @param short: don't include local properties in resulting dict.
- @type short: bool
-
- @return: structure as dict
- @rtype: dict
- """
-
- res = super(CrTfApplication, self).as_dict(short=short)
-
- res['cfg_dir'] = self.cfg_dir
- res['cfg_file'] = self.cfg_file
- res['__pkg_version__'] = __pkg_version__
- res['config'] = None
- if self.config:
- res['config'] = self.config.as_dict(short=short, show_secrets=self.force)
-
- if 'xlate' not in res:
- res['xlate'] = {}
- res['xlate'][DOMAIN] = {
- '__module_dir__': __xlate_module_dir__,
- '__base_dir__': __xlate_base_dir__,
- 'LOCALE_DIR': LOCALE_DIR,
- 'DOMAIN': DOMAIN,
- '__mo_file__': __xlate_mo_file__,
- }
-
- return res
-
- # -------------------------------------------------------------------------
- def init_arg_parser(self):
- """
- Public available method to initiate the argument parser.
- """
-
- super(CrTfApplication, self).init_arg_parser()
-
- cur_dir = Path(os.getcwd())
-
- default_cfg_file = self.base_dir.joinpath('etc').joinpath(self.appname + '.ini')
- default_cfg_file_rel = Path(os.path.relpath(str(default_cfg_file), str(cur_dir)))
-
- steps = list(CreateTerraformHandler.steps[:]) + ['?']
-
- self.arg_parser.add_argument(
- '-S', '--stop-after', metavar=_('STEP'), dest='stop_after', choices=steps,
- action=StopStepOptionAction,
- help=_(
- "Name of the step, where to interrupt the execution of this script. "
- "Use {!r} to show a list of all avaliable steps.").format('--stop-after ?')
- )
-
- self.arg_parser.add_argument(
- '-c', '--config', '--config-file', dest='cfg_file', metavar=_('FILE'),
- action=CfgFileOptionAction,
- help=_("Configuration file (default: {!r})").format(str(default_cfg_file_rel))
- )
-
- # PowerDNS options
- pdns_group = self.arg_parser.add_argument_group(_('PowerDNS options'))
-
- pdns_group.add_argument(
- '--no-pdns', action="store_true", dest='no_pdns',
- help=_(
- "Don't execute any PowerDNS checks or actions. In this case it's on yours "
- "to ensure existence of all necessary IP addresses.")
- )
-
- pdns_group.add_argument(
- '-M', '--pdns-master', metavar=_("HOST"), dest='pdns_master',
- help=_(
- "The hostname or address of the PowerDNS master server "
- "(Default: {!r}).").format(CrTfConfiguration.default_pdns_master_server)
- )
-
- pdns_group.add_argument(
- '--api-port', metavar=_("PORT"), type=int, dest="pdns_api_port",
- help=_("The port number of the PowerDNS API (Default: {}).").format(
- CrTfConfiguration.default_pdns_api_port)
- )
-
- pdns_group.add_argument(
- '--api-key', metavar=_("KEY"), dest="pdns_api_key",
- help=_("The key accessing to the PDNS API.")
- )
-
- pdns_group.add_argument(
- '--api-https', action="store_true", dest="pdns_api_https",
- help=_("Should PDNS API requests executed per HTTPS?"),
- )
-
- pdns_group.add_argument(
- '--api-prefix', metavar=_("PATH"), dest='pdns_api_prefix',
- help=_("The path prefix in the URL for PDNS API requests (Default: {!r}).").format(
- CrTfConfiguration.default_pdns_api_path_prefix)
- )
-
- # Positional arguments
- self.arg_parser.add_argument(
- "yaml_file", nargs=1, metavar=_("YAML_FILE"), action=YamlFileOptionAction,
- help=_("The YAML-file with the definition of the VMs to create with terraform."),
- )
-
- # -------------------------------------------------------------------------
- def perform_arg_parser(self):
-
- if self.args.cfg_file:
- self._cfg_file = Path(self.args.cfg_file)
- if not self.cfg_file.is_absolute():
- self._cfg_file = self.cfg_file.resolve()
-
- # -------------------------------------------------------------------------
- def perform_arg_parser_rest(self):
- """
- Public available method to execute some actions after parsing
- the command line parameters.
- """
-
- self.perform_arg_parser_pdns()
-
- self.yaml_file = Path(self.args.yaml_file[0])
- if not self.yaml_file.is_absolute():
- self.yaml_file = self.yaml_file.resolve()
-
- # -------------------------------------------------------------------------
- def perform_arg_parser_pdns(self):
-
- if self.args.no_pdns:
- self.config.no_pdns = True
- if self.args.pdns_master:
- self.config.pdns_master_server = self.args.pdns_master
- if self.args.pdns_api_port:
- self.config.pdns_api_port = self.args.pdns_api_port
- if self.args.pdns_api_key:
- self.config.pdns_api_key = self.args.pdns_api_key
- if self.args.pdns_api_https:
- self.config.pdns_api_use_https = True
- if self.args.pdns_api_prefix:
- self.config.pdns_api_path_prefix = self.args.pdns_api_prefix
-
- # -------------------------------------------------------------------------
- def _run(self):
- """Main routine."""
-
- LOG.info(_("Starting {a!r}, version {v!r} ...").format(
- a=self.appname, v=__pkg_version__))
-
- try:
- if self.handler.first_call(self.yaml_file):
- self.verify_vsphere_credentials()
- self.handler(self.yaml_file)
- except ExpectedHandlerError as e:
- self.handler = None
- self.handle_error(str(e), _("Create Terraform environment"))
- self.exit(5)
-
- # -------------------------------------------------------------------------
- def verify_vsphere_credentials(self):
-
- if not self.handler:
- raise TerraformHandlerError(_("No handler object available."))
-
- need_nl = False
-
- if not self.handler.vsphere_user:
-
- need_nl = True
- msg = '\n' + _("Please input the {}.").format(self.colored(
- _('vSphere user name'), 'AQUA'))
- print(msg)
- self.handler.vsphere_user = input(self.colored(_('Name'), 'AQUA') + ': ')
- if not self.handler.vsphere_user:
- msg = _("No {} given.").format(_('vSphere user name'))
- raise ExpectedHandlerError(msg)
-
- for vname in self.handler.vsphere.keys():
- LOG.debug(_("Setting user for vSphere {vs!r} to {usr!r}.").format(
- vs=vname, usr=self.handler.vsphere_user))
- # Dirty, but else a change of fb_tools would be necessary (later)
- self.handler.vsphere[vname]._user = self.handler.vsphere_user
- print('')
- need_nl = False
-
- if not self.handler.vsphere_password:
-
- # Get the name of the first (and hopefully only) VSphere
- vname = None
- for vn in self.handler.vsphere.keys():
- vname = vn
- break
-
- if need_nl:
- print('')
- prompt = self.colored(_("User password of {!r}").format(
- self.handler.vsphere_user), 'AQUA')
- item = _('Password for user {u!r} of vSphere {n} on {h!r}').format(
- u=self.handler.vsphere_user, n=vname, h=self.config.vsphere[vname].host)
- item = self.colored(item, 'AQUA')
- self.handler.vsphere_password = self.get_secret(prompt=prompt, item_name=item)
- if not self.handler.vsphere_password:
- msg = _("No {} given.").format(_('password of vSphere user'))
- raise ExpectedHandlerError(msg)
-
- for vname in self.handler.vsphere.keys():
- LOG.debug(_("Setting passwort of vSphere {vs!r} user {usr!r}.").format(
- vs=vname, usr=self.handler.vsphere_user))
- # Dirty, but else a change of fb_tools would be necessary (later)
- self.handler.vsphere[vname]._password = self.handler.vsphere_password
- print('')
- need_nl = False
-
- if need_nl:
- print('')
-
- # -------------------------------------------------------------------------
- def post_run(self):
- """
- Dummy function to run after the main routine.
- Could be overwritten by descendant classes.
-
- """
-
- if self.verbose > 1:
- LOG.info(_("Executing {} ...").format('post_run()'))
-
- if self.handler:
- self.handler = None
-
-
-# =============================================================================
-
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2024 by Frank Brehm, Berlin
-@summary: A module for providing a configuration
-"""
-from __future__ import absolute_import
-
-# Standard module
-import logging
-import re
-
-# Third party modules
-import pytz
-
-from fb_tools.config import BaseConfiguration
-
-from fb_tools.common import to_bool, RE_FQDN, pp
-
-from fb_pdnstools import DEFAULT_PORT as DEFAULT_PDNS_API_PORT
-from fb_pdnstools import DEFAULT_TIMEOUT as DEFAULT_PDNS_API_TIMEOUT # noqa: F401
-from fb_pdnstools import DEFAULT_API_PREFIX as DEFAULT_PDNS_API_PREFIX
-from fb_pdnstools import DEFAULT_USE_HTTPS as DEFAULT_PDNS_API_USE_HTTPS
-
-# Own modules
-
-from .errors import CrTfConfigError
-
-from .vs_config import VsphereConfig
-
-from .xlate import XLATOR
-
-__version__ = '1.9.0'
-LOG = logging.getLogger(__name__)
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-
-# =============================================================================
-class CrTfConfiguration(BaseConfiguration):
- """
- A class for providing a configuration for the CrTfApplication class
- and methods to read it from configuration files.
- """
-
- default_pdns_master_server = 'master.pp-dns.com'
- default_pdns_api_port = DEFAULT_PDNS_API_PORT
- default_pdns_api_use_https = bool(DEFAULT_PDNS_API_USE_HTTPS)
- default_pdns_api_path_prefix = DEFAULT_PDNS_API_PREFIX
- default_pdns_api_timeout = DEFAULT_PDNS_API_PORT
- default_pdns_comment_account = 'provisioning'
-
- default_rhsm_user = 'dpx-subscriber'
-
- default_vsphere_defs = {
- 'live': {
- 'host': 'vcs01.ppbrln.internal',
- 'port': 443,
- 'dc': 'vmcc',
- 'cluster': 'vmcc-l105-01',
- },
- 'test': {
- 'host': 'test-vcsa01.pixelpark.net',
- 'port': 443,
- 'dc': 'test-vmcc',
- 'cluster': 'test-vmcc-l105-01',
- },
- }
-
- default_vsphere_tag_cat_os_id = 'OS'
- default_vsphere_tag_cat_os_name = 'OS'
- default_vsphere_tag_cat_os_desc = 'The operating system of a VM.'
-
- default_vsphere_tag_os_rhel_id = 'os_rhel'
- default_vsphere_tag_os_rhel_name = 'rhel'
- default_vsphere_tag_os_rhel_desc = 'RedHat Enterprise Linux'
-
- default_min_root_size_gb = 32.0
- default_tz_name = 'Europe/Berlin'
- default_guest_id = "oracleLinux7_64Guest"
-
- default_disk_size = 10.0
- default_root_min_size = 20.0
- default_root_max_size = 512.0
- default_disk_min_size = 4.0
- default_disk_max_size = 1024.0
-
- default_tf_backend_host = 'terraform.pixelpark.com'
- default_tf_backend_scheme = 'https'
- default_tf_backend_path_prefix = 'terraform'
-
- re_list_split = re.compile(r'\s*[,:\s]+\s*')
-
- default_puppetmaster = 'puppetmaster03.pixelpark.com'
- default_puppetca = 'puppetca01.pixelpark.com'
-
- msg_invalid_type = _("Invalid value {v!r} for {n!r} configuration ({f!r}:[{s}]): {e}")
- msg_val_negative = _(
- "Invalid value {v} for {n!r} configuration ({f!r}:[{s}]): "
- "must be equal or greater than zero.")
-
- max_pdns_api_timeout = 3600
-
- re_excl_ds = re.compile(r'^\s*excluded?[-_]datastores?\s*$', re.IGNORECASE)
- re_split_ds = re.compile(r'[,;\s]+')
- re_template = re.compile(r'^\s*template(?:[-_\.]?name)?\s*$', re.IGNORECASE)
- re_min_root_size = re.compile(
- r'^\s*min[-_\.]?root[-_\.]?size(?:[-_\.]?gb)\s*$', re.IGNORECASE)
- re_guest_id = re.compile(r'^\s*guest[-_]?id\s*$', re.IGNORECASE)
-
- # -------------------------------------------------------------------------
- def __init__(
- self, appname=None, verbose=0, version=__version__, base_dir=None, simulate=False,
- encoding=None, config_dir=None, config_file=None, initialized=False):
-
- self.pdns_master_server = self.default_pdns_master_server
- self.pdns_api_port = self.default_pdns_api_port
- self._pdns_api_key = None
- self._pdns_api_use_https = self.default_pdns_api_use_https
- self._pdns_api_timeout = self.default_pdns_api_timeout
- self.pdns_api_path_prefix = self.default_pdns_api_path_prefix
- self.min_root_size_gb = self.default_min_root_size_gb
- self._vm_root_password = None
- self.tz_name = self.default_tz_name
- self.guest_id = self.default_guest_id
- self.puppetmaster = self.default_puppetmaster
- self.puppetca = self.default_puppetca
- self.pdns_comment_account = self.default_pdns_comment_account
- self._rhsm_user = self.default_rhsm_user
- self._rhsm_password = None
-
- self._vsphere_tag_cat_os_id = self.default_vsphere_tag_cat_os_id
- self._vsphere_tag_cat_os_name = self.default_vsphere_tag_cat_os_name
- self._vsphere_tag_cat_os_desc = self.default_vsphere_tag_cat_os_desc
-
- self._vsphere_tag_os_rhel_id = self.default_vsphere_tag_os_rhel_id
- self._vsphere_tag_os_rhel_name = self.default_vsphere_tag_os_rhel_name
- self._vsphere_tag_os_rhel_desc = self.default_vsphere_tag_os_rhel_desc
-
- self._no_pdns = False
-
- self.puppet_envs_add = set()
- self.puppet_envs_delete = set()
-
- self.vsphere = {}
-
- self._disk_size = self.default_disk_size
-
- self._root_min_size = self.default_root_min_size
- self._root_max_size = self.default_root_max_size
- self._disk_min_size = self.default_disk_min_size
- self._disk_max_size = self.default_disk_max_size
-
- self.tf_backend_host = self.default_tf_backend_host
- self.tf_backend_scheme = self.default_tf_backend_scheme
- self.tf_backend_path_prefix = self.default_tf_backend_path_prefix
-
- self._simulate = False
-
- self.excluded_datastores = []
-
- super(CrTfConfiguration, self).__init__(
- appname=appname, verbose=verbose, version=version, base_dir=base_dir,
- encoding=encoding, config_dir=config_dir, config_file=config_file, initialized=False,
- )
-
- self.simulate = simulate
-
- if initialized:
- self.initialized = True
-
- # -----------------------------------------------------------
- @property
- def simulate(self):
- """A flag describing, that all should be simulated."""
- return self._simulate
-
- @simulate.setter
- def simulate(self, value):
- self._simulate = to_bool(value)
-
- # -----------------------------------------------------------
- @property
- def no_pdns(self):
- """Don't execute some PowerDNS actions or checks."""
- return self._no_pdns
-
- @no_pdns.setter
- def no_pdns(self, value):
- self._no_pdns = to_bool(value)
-
- # -----------------------------------------------------------
- @property
- def pdns_api_key(self):
- """The key used to authenticate against the PowerDNS API."""
- return self._pdns_api_key
-
- @pdns_api_key.setter
- def pdns_api_key(self, value):
- if value is None:
- self._pdns_api_key = None
- return
- val = str(value)
- if val == '':
- self._pdns_api_key = None
- else:
- self._pdns_api_key = val
-
- # -----------------------------------------------------------
- @property
- def pdns_api_use_https(self):
- "Should HTTPS used for PDNS API calls."
- return self._pdns_api_use_https
-
- @pdns_api_use_https.setter
- def pdns_api_use_https(self, value):
- self._pdns_api_use_https = to_bool(value)
-
- # -----------------------------------------------------------
- @property
- def pdns_api_timeout(self):
- """The timeout in seconds for requesting the PowerDNS API."""
- return self._pdns_api_timeout
-
- @pdns_api_timeout.setter
- def pdns_api_timeout(self, value):
- if value is None:
- self._pdns_api_timeout = self.default_pdns_api_timeout
- return
- val = int(value)
- err_msg = _(
- "Invalid timeout {t!r} for requesting the PowerDNS API, "
- "must be 0 < SECONDS < {m}.")
- if val <= 0 or val > self.max_pdns_api_timeout:
- msg = err_msg.format(t=value, m=self.max_pdns_api_timeout)
- raise ValueError(msg)
- self._pdns_api_timeout = val
-
- # -----------------------------------------------------------
- @property
- def vsphere_tag_cat_os_id(self):
- """The terraform ID of the VSphere tag category 'OS'."""
- return self._vsphere_tag_cat_os_id
-
- # -----------------------------------------------------------
- @property
- def vsphere_tag_cat_os_name(self):
- """The name of the VSphere OS tag category."""
- return self._vsphere_tag_cat_os_name
-
- # -----------------------------------------------------------
- @property
- def vsphere_tag_cat_os_desc(self):
- """The description of the VSphere OS tag category."""
- return self._vsphere_tag_cat_os_desc
-
- # -----------------------------------------------------------
- @property
- def vsphere_tag_os_rhel_id(self):
- """The terraform ID of the VSphere tag for OS RHEL."""
- return self._vsphere_tag_os_rhel_id
-
- # -----------------------------------------------------------
- @property
- def vsphere_tag_os_rhel_name(self):
- """The name of the VSphere tag for OS RHEL."""
- return self._vsphere_tag_os_rhel_name
-
- # -----------------------------------------------------------
- @property
- def vsphere_tag_os_rhel_desc(self):
- """The description of the VSphere tag for OS RHEL."""
- return self._vsphere_tag_os_rhel_desc
-
- # -----------------------------------------------------------
- @property
- def vm_root_password(self):
- """The password of the VSphere user."""
- return self._vm_root_password
-
- @vm_root_password.setter
- def vm_root_password(self, value):
- if value is None:
- self._vm_root_password = None
- return
- val = str(value)
- if val == '':
- self._vm_root_password = None
- else:
- self._vm_root_password = val
-
- # -----------------------------------------------------------
- @property
- def disk_size(self):
- """Default data disk size in GiB."""
- return self._disk_size
-
- @disk_size.setter
- def disk_size(self, value):
- if value is None:
- msg = _("The default size of the data disk may not be None.")
- raise TypeError(msg)
- val = float(value)
- if val < 1:
- msg = _("The default size of the data disk must be greater or equal to one GB.")
- raise ValueError(msg)
- self._disk_size = val
-
- # -----------------------------------------------------------
- @property
- def disk_min_size(self):
- """Minimal data disk size in GiB."""
- return self._disk_min_size
-
- @disk_min_size.setter
- def disk_min_size(self, value):
- if value is None:
- msg = _("The minimal size of the data disk may not be None.")
- raise TypeError(msg)
- val = float(value)
- if val < 1:
- msg = _("The minimal size of the data disk must be greater or equal to one GB.")
- raise ValueError(msg)
- self._disk_min_size = val
-
- # -----------------------------------------------------------
- @property
- def disk_max_size(self):
- """Maximal data disk size in GiB."""
- return self._disk_max_size
-
- @disk_max_size.setter
- def disk_max_size(self, value):
- if value is None:
- msg = _("The maximal size of the data disk may not be None.")
- raise TypeError(msg)
- val = float(value)
- if val < 1:
- msg = _("The maximal size of the data disk must be greater or equal to one GB.")
- raise ValueError(msg)
- self._disk_max_size = val
-
- # -----------------------------------------------------------
- @property
- def root_min_size(self):
- """Minimal root disk size in GiB."""
- return self._root_min_size
-
- @root_min_size.setter
- def root_min_size(self, value):
- if value is None:
- msg = _("The minimal size of the root disk may not be None.")
- raise TypeError(msg)
- val = float(value)
- if val < 1:
- msg = _("The minimal size of the root disk must be greater or equal to one GB.")
- raise ValueError(msg)
- self._root_min_size = val
-
- # -----------------------------------------------------------
- @property
- def root_max_size(self):
- """Maximal root disk size in GiB."""
- return self._root_max_size
-
- @root_max_size.setter
- def root_max_size(self, value):
- if value is None:
- msg = _("The maximal size of the root disk may not be None.")
- raise TypeError(msg)
- val = float(value)
- if val < 1:
- msg = _("The maximal size of the root disk must be greater or equal to one GB.")
- raise ValueError(msg)
- self._root_max_size = val
-
- # -----------------------------------------------------------
- @property
- def rhsm_user(self):
- """The user used for subscribing the VM at RedHat."""
- return self._rhsm_user
-
- @rhsm_user.setter
- def rhsm_user(self, value):
- if value is None:
- self._rhsm_user = self.default_rhsm_user
- return
- val = str(value).strip()
- if val == '':
- self._rhsm_user = self.default_rhsm_user
- else:
- self._rhsm_user = val
-
- # -----------------------------------------------------------
- @property
- def rhsm_password(self):
- """The password of the user used for subscribing the VM at RedHat."""
- return self._rhsm_password
-
- @rhsm_password.setter
- def rhsm_password(self, value):
- if value is None:
- msg = _("The password of the user used for subscribing at RedHat may not be None.")
- raise CrTfConfigError(msg)
- val = str(value).strip()
- if val == '':
- msg = _("The password of the user used for subscribing at RedHat may not be empty.")
- raise CrTfConfigError(msg)
- self._rhsm_password = val
-
- # -------------------------------------------------------------------------
- def init_vsphere_defaults(self):
-
- for vname in self.default_vsphere_defs.keys():
-
- vs_data = self.default_vsphere_defs[vname]
-
- params = {
- 'appname': self.appname,
- 'verbose': self.verbose,
- 'base_dir': self.base_dir,
- 'name': vname,
- 'host': vs_data['host'],
- 'port': vs_data['port'],
- 'dc': vs_data['dc'],
- 'cluster': vs_data['cluster'],
- }
-
- if self.verbose > 2:
- msg = _("Creating a {}-object with parameters:").format('VsphereConfig')
- msg += '\n' + pp(params)
- LOG.debug(msg)
- vsphere = VsphereConfig(**params)
- if self.verbose > 2:
- LOG.debug(_("Created object:") + '\n' + pp(vsphere.as_dict()))
-
- self.vsphere[vname] = vsphere
-
- # -------------------------------------------------------------------------
- def as_dict(self, short=True, show_secrets=False):
- """
- Transforms the elements of the object into a dict
-
- @param short: don't include local properties in resulting dict.
- @type short: bool
-
- @return: structure as dict
- @rtype: dict
- """
-
- res = super(CrTfConfiguration, self).as_dict(short=short)
-
- res['simulate'] = self.simulate
- res['no_pdns'] = self.no_pdns
- res['pdns_api_use_https'] = self.pdns_api_use_https
- res['pdns_api_timeout'] = self.pdns_api_timeout
- res['vm_root_password'] = None
- res['pdns_api_key'] = None
- res['disk_size'] = self.disk_size
- res['disk_min_size'] = self.disk_min_size
- res['disk_max_size'] = self.disk_max_size
- res['root_min_size'] = self.root_min_size
- res['root_max_size'] = self.root_max_size
- res['rhsm_user'] = self.rhsm_user
- res['vsphere_tag_cat_os_id'] = self.vsphere_tag_cat_os_id
- res['vsphere_tag_cat_os_name'] = self.vsphere_tag_cat_os_name
- res['vsphere_tag_cat_os_desc'] = self.vsphere_tag_cat_os_desc
- res['vsphere_tag_os_rhel_id'] = self.vsphere_tag_os_rhel_id
- res['vsphere_tag_os_rhel_name'] = self.vsphere_tag_os_rhel_name
- res['vsphere_tag_os_rhel_desc'] = self.vsphere_tag_os_rhel_desc
-
- res['vsphere'] = {}
- for vsphere_name in self.vsphere.keys():
- res['vsphere'][vsphere_name] = self.vsphere[vsphere_name].as_dict(
- short=short, show_secrets=show_secrets)
-
- if self.pdns_api_key:
- if show_secrets or self.verbose > 4:
- res['pdns_api_key'] = self.pdns_api_key
- else:
- res['pdns_api_key'] = '*******'
-
- if self.vm_root_password:
- if show_secrets or self.verbose > 4:
- res['vm_root_password'] = self.vm_root_password
- else:
- res['vm_root_password'] = '*******'
-
- if self.rhsm_password:
- if show_secrets or self.verbose > 4:
- res['rhsm_password'] = self.rhsm_password
- else:
- res['rhsm_password'] = '*******'
- else:
- res['rhsm_password'] = None
-
- return res
-
- # -------------------------------------------------------------------------
- def eval_config_section(self, config, section_name):
- """Evaluating of all found configuration options."""
-
- if self.verbose > 2:
- msg = _("Checking config section {!r}:").format(section_name)
- LOG.debug(msg)
-
- super(CrTfConfiguration, self).eval_config_section(config, section_name)
-
- sn = section_name.lower()
-
- if sn == 'vsphere' or sn.startswith('vsphere:'):
- if sn.startswith('vsphere:'):
- vsphere_name = sn.replace('vsphere:', '').strip()
- if vsphere_name == '':
- LOG.error(_("Empty VSPhere name found."))
- else:
- self.eval_config_vsphere(config, section_name, vsphere_name)
- else:
- self.eval_config_vsphere(config, section_name, '_default')
- elif sn == 'powerdns' or sn == 'pdns':
- self.eval_config_pdns(config, section_name)
- elif sn == 'terraform':
- self.eval_config_terraform(config, section_name)
-
- # -------------------------------------------------------------------------
- def eval_config_global(self, config, section_name):
- """Evaluating section [global] of configuration.
- May be overridden in descendant classes."""
-
- super(CrTfConfiguration, self).eval_config_global(
- config=config, section_name=section_name)
-
- re_tz = re.compile(r'^\s*(?:tz|time[_-]?zone)\s*$', re.IGNORECASE)
- re_puppetmaster = re.compile(r'^\s*puppet[_-]?master\s*$', re.IGNORECASE)
- re_puppetca = re.compile(r'^\s*puppet[_-]?ca\s*$', re.IGNORECASE)
- re_rhsm_user = re.compile(r'^\s*rhsm[_-]?user\s*$', re.IGNORECASE)
- re_rhsm_password = re.compile(r'^\s*rhsm[_-]?password\s*$', re.IGNORECASE)
-
- for (key, value) in config.items(section_name):
- if key.lower() == 'simulate':
- self.simulate = value
- elif re_tz.search(key) and value.strip():
- val = value.strip()
- try:
- tz = pytz.timezone(val) # noqa
- except pytz.exceptions.UnknownTimeZoneError as e:
- raise CrTfConfigError(self.msg_invalid_type.format(
- f=self.config_file, s=section_name, v=value, n='time_zone', e=e))
- self.tz_name = value.strip()
- elif re_puppetmaster.search(key) and value.strip():
- val = value.strip()
- if not RE_FQDN.search(val):
- raise CrTfConfigError(self.msg_invalid_type.format(
- f=self.config_file, s=section_name, v=value, n='puppet_master',
- e='Invalid Host FQDN for puppetmaster'))
- self.puppetmaster = val.lower()
- elif re_puppetca.search(key) and value.strip():
- val = value.strip()
- if not RE_FQDN.search(val):
- raise CrTfConfigError(self.msg_invalid_type.format(
- f=self.config_file, s=section_name, v=value, n='puppet_ca',
- e='Invalid Host FQDN for puppetca'))
- self.puppetca = val.lower()
- elif re_rhsm_user.search(key) and value.strip():
- self.rhsm_user = value.strip()
- elif re_rhsm_password.search(key) and value.strip():
- self.rhsm_password = value.strip()
-
- # -------------------------------------------------------------------------
- def eval_config_vsphere(self, config, section_name, vsphere_name):
-
- if self.verbose > 2:
- LOG.debug(_("Checking config section {s!r} ({n}) ...").format(
- s=section_name, n=vsphere_name))
-
- if vsphere_name in self.vsphere:
- self.eval_config_existing_vsphere(config, section_name, vsphere_name)
- else:
- self.eval_config_new_vsphere(config, section_name, vsphere_name)
-
- # -------------------------------------------------------------------------
- def eval_config_new_vsphere(self, config, section_name, vsphere_name):
-
- params = {
- 'appname': self.appname,
- 'verbose': self.verbose,
- 'base_dir': self.base_dir,
- 'name': vsphere_name,
- }
-
- for (key, value) in config.items(section_name):
-
- if key.lower() == 'host' and value.strip():
- params['host'] = value.strip()
- elif key.lower() == 'port':
- params['port'] = value
- elif key.lower() == 'user' and value.strip():
- params['user'] = value.strip()
- elif key.lower() == 'password':
- params['password'] = value
- elif key.lower() == 'dc' and value.strip():
- params['dc'] = value.strip()
- elif key.lower() == 'cluster' and value.strip():
- params['cluster'] = value.strip()
- elif self.re_template.search(key) and value.strip():
- params['template_name'] = value.strip()
- elif self.re_excl_ds.search(key) and value.strip():
- datastores = self.re_split_ds.split(value.strip())
- params['excluded_ds'] = datastores
- elif self.re_min_root_size.search(key) and value.strip():
- params['min_root_size_gb'] = value
- elif self.re_guest_id.search(key) and value.strip():
- params['guest_id'] = value.strip()
- else:
- msg = _(
- "Unknown configuration parameter {k!r} with value {v!r} for VSPhere {n!r} "
- "found.").format(k=key, v=value, n=vsphere_name)
- LOG.warning(msg)
-
- if self.verbose > 2:
- msg = _("Creating a {}-object with parameters:").format('VsphereConfig')
- msg += '\n' + pp(params)
- LOG.debug(msg)
- vsphere = VsphereConfig(**params)
- if self.verbose > 2:
- LOG.debug(_("Created object:") + '\n' + pp(vsphere.as_dict()))
-
- vsphere.is_valid(raise_on_error=True)
-
- self.vsphere[vsphere_name] = vsphere
-
- return
-
- # -------------------------------------------------------------------------
- def eval_config_existing_vsphere(self, config, section_name, vsphere_name):
-
- vsphere = self.vsphere[vsphere_name]
-
- for (key, value) in config.items(section_name):
-
- if key.lower() == 'host' and value.strip():
- vsphere.host = value.strip()
- elif key.lower() == 'port':
- vsphere.port = value
- elif key.lower() == 'user' and value.strip():
- vsphere.user = value.strip()
- elif key.lower() == 'password':
- vsphere.password = value
- elif key.lower() == 'dc' and value.strip():
- vsphere.dc = value.strip()
- elif key.lower() == 'cluster' and value.strip():
- vsphere.cluster = value.strip()
- elif self.re_template.search(key) and value.strip():
- vsphere.template_name = value.strip()
- elif self.re_excl_ds.search(key) and value.strip():
- datastores = self.re_split_ds.split(value.strip())
- vsphere.datastores = datastores
- elif self.re_min_root_size.search(key) and value.strip():
- vsphere.min_root_size_gb = value.strip()
- elif self.re_guest_id.search(key) and value.strip():
- vsphere.guest_id = value.strip()
- else:
- msg = _(
- "Unknown configuration parameter {k!r} with value {v!r} for VSPhere {n!r} "
- "found.").format(k=key, v=value, n=vsphere_name)
- LOG.warning(msg)
-
- if self.verbose > 2:
- LOG.debug(_("Updated object:") + '\n' + pp(vsphere.as_dict()))
-
- vsphere.is_valid(raise_on_error=True)
-
- self.vsphere[vsphere_name] = vsphere
-
- return
-
- # -------------------------------------------------------------------------
- def eval_config_pdns(self, config, section):
-
- if self.verbose > 2:
- LOG.debug(_("Checking config section {!r} ...").format(section))
-
- re_master = re.compile(
- r'^\s*(?:master(?:[-_\.]?server)?|api(?:[-_\.]?(?:host|server)))\s*$', re.IGNORECASE)
- re_port = re.compile(r'^\s*(?:api[-_\.]?)?port\s*$', re.IGNORECASE)
- re_key = re.compile(r'^\s*(?:api[-_\.]?)?key\s*$', re.IGNORECASE)
- re_use_https = re.compile(r'^\s*(?:api[-_\.]?)?(?:use[-_\.]?)?https\s*$', re.IGNORECASE)
- re_prefix = re.compile(r'^\s*(?:api[-_\.]?)?(?:path[-_\.]?)?prefix\s*$', re.IGNORECASE)
- re_comment_account = re.compile(r'^\s*comment[-_\.]?account\s*$', re.IGNORECASE)
-
- for (key, value) in config.items(section):
- if re_master.search(key) and value.strip():
- self.pdns_master_server = value.strip().lower()
- elif re_port.search(key) and value.strip():
- val = 0
- try:
- val = int(value.strip())
- except ValueError as e:
- raise CrTfConfigError(self.msg_invalid_type.format(
- f=self.config_file, s=section, v=value, n=key, e=e))
- if val < 0:
- raise CrTfConfigError(self.msg_val_negative.format(
- f=self.config_file, s=section, v=value, n=key))
- self.pdns_api_port = val
- elif re_key.search(key) and value.strip():
- self.pdns_api_key = value.strip()
- elif re_use_https.search(key):
- self.pdns_api_use_https = value
- elif re_prefix.search(key) and value.strip():
- self.pdns_api_path_prefix = value.strip()
- elif key.lower() == 'timeout' and value.strip():
- self.pdns_api_timeout = value.strip()
- elif re_comment_account.search(key) and value.strip():
- self.pdns_comment_account = value.strip()
-
- return
-
- # -------------------------------------------------------------------------
- def eval_config_terraform(self, config, section):
-
- if self.verbose > 2:
- LOG.debug(_("Checking config section {!r} ...").format(section))
-
- re_root_pw = re.compile(r'^\s*root[_-]?passw(?:ord)?\s*$', re.IGNORECASE)
-
- re_disk_size = re.compile(r'^\s*(?:data[_-]?)?disk[_-]?size\s*$', re.IGNORECASE)
-
- re_disk_min_size = re.compile(
- r'^\s*(?:data[_-]?)?disk[_-]?min[_-]?size\s*$', re.IGNORECASE)
- re_disk_max_size = re.compile(
- r'^\s*(?:data[_-]?)?disk[_-]?max[_-]?size\s*$', re.IGNORECASE)
- re_root_disk_min_size = re.compile(
- r'^\s*root[_-]?disk[_-]?min[_-]?size\s*$', re.IGNORECASE)
- re_root_disk_max_size = re.compile(
- r'^\s*root[_-]?disk[_-]?max[_-]?size\s*$', re.IGNORECASE)
-
- re_backend_host = re.compile(r'^\s*backend[_-]?host\s*$', re.IGNORECASE)
- re_backend_scheme = re.compile(r'^\s*backend[_-]?scheme\s*$', re.IGNORECASE)
- re_backend_path_prefix = re.compile(r'^\s*backend[_-]?path[_-]?prefix\s*$', re.IGNORECASE)
-
- # re_list_split
- re_puppet_envs = re.compile(r'^\s*puppet[_-]?env(?:ironment)?s?\s*$', re.IGNORECASE)
- re_puppet_env = re.compile(r'^([+-])?([a-z](?:[a-z0-9_]*[a-z0-9])?)$', re.IGNORECASE)
-
- for (key, value) in config.items(section):
- if re_root_pw.search(key) and value.strip():
- self.vm_root_password = value
- elif re_disk_size.search(key):
- self.disk_size = value
- elif re_disk_min_size.search(key):
- self.disk_min_size = value
- elif re_disk_max_size.search(key):
- self.disk_max_size = value
- elif re_root_disk_min_size.search(key):
- self.root_min_size = value
- elif re_root_disk_max_size.search(key):
- self.root_max_size = value
- elif re_backend_host.search(key) and value.strip():
- self.tf_backend_host = value.strip().lower()
- elif re_backend_scheme.search(key) and value.strip():
- self.tf_backend_scheme = value.strip().lower()
- elif re_backend_path_prefix.search(key) and value.strip():
- self.tf_backend_path_prefix = value.strip()
- elif re_puppet_envs.search(key) and value.strip():
- v = value.strip()
- env_list = self.re_list_split.split(v)
- for env in env_list:
- match = re_puppet_env.match(env)
- if not match:
- msg = _("Invalid puppet environment {env!r} found in {k!r}.").format(
- env=env, k=key)
- LOG.warn(msg)
- continue
- sign = match.group(1)
- val = match.group(2).lower()
- if sign == '-':
- self.puppet_envs_delete.add(val)
- else:
- self.puppet_envs_add.add(val)
-
-
-# =============================================================================
-
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@summary: module for some common used error classes
-"""
-from __future__ import absolute_import
-
-# Standard modules
-
-
-# Own modules
-from fb_tools.errors import FbHandlerError, ExpectedHandlerError
-
-from fb_tools.config import ConfigError
-
-from .xlate import XLATOR
-
-__version__ = '1.3.0'
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-
-# =============================================================================
-class TerraformObjectError(FbHandlerError):
- """Exception class on errors evaluation VM definition for terraform."""
-
- pass
-
-
-# =============================================================================
-class TerraformHandlerError(TerraformObjectError):
- """Exception because of handler errors."""
-
- pass
-
-
-# =============================================================================
-class TerraformVSphereError(TerraformObjectError):
- """Exception because of VSphere configuration errors."""
-
- pass
-
-
-# =============================================================================
-class TerraformVmError(TerraformObjectError):
- """Exception class on errors evaluation VM definition for terraform."""
-
- pass
-
-
-# =============================================================================
-class TerraformVmDefinitionError(TerraformVmError):
- """Exception class on errors evaluation VM definition for terraform."""
-
- pass
-
-
-# =============================================================================
-class TerraformVmTooManyDisksError(TerraformVmDefinitionError):
- """Exception class for the case, that too many disks should be connected to a VM."""
-
- # -------------------------------------------------------------------------
- def __init__(self, given_disks, max_disks=60):
- """Initiate this exception class."""
- self.given_disks = int(given_disks)
- self.max_disks = int(max_disks)
-
- # -------------------------------------------------------------------------
- def __str__(self):
- """Typecast into a string."""
- msg = _(
- "There should be too many disks ({gd}) assigned to a VM. "
- "There are max. {maxd} disks allowed to assign to a VM.").format(
- gd=self.given_disks, maxd=self.max_disks)
- return msg
-
-
-# =============================================================================
-class NetworkNotExistingError(ExpectedHandlerError):
- """Special error class for the case, if the expected network is not existing."""
-
- # -------------------------------------------------------------------------
- def __init__(self, net_name):
-
- self.net_name = net_name
-
- # -------------------------------------------------------------------------
- def __str__(self):
-
- msg = _("The network {!r} is not existing.").format(self.net_name)
- return msg
-
-
-# =============================================================================
-class CrTfConfigError(ConfigError):
- """Base error class for all exceptions happened during
- evaluation of the cofiguration."""
-
- pass
-
-
-# =============================================================================
-class AbortExecution(ExpectedHandlerError):
- """Indicating an abort of the execution."""
-
- # -------------------------------------------------------------------------
- def __init__(self, step=None):
-
- if step:
- self.step = step
- else:
- self.step = _('<some unknown step>')
-
- # -------------------------------------------------------------------------
- def __str__(self):
-
- return _("Aborting after {!r}.").format(self.step)
-
-
-# =============================================================================
-
-if __name__ == "__main__":
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2024 by Frank Brehm, Berlin
-@summary: A handler module for underlaying actions
-"""
-from __future__ import absolute_import, print_function
-
-# Standard module
-import os
-import logging
-import re
-import stat
-import copy
-
-from pathlib import Path
-
-from subprocess import PIPE
-
-from distutils.version import LooseVersion
-
-# Third party modules
-import pytz
-import six
-
-from fb_tools.common import pp, to_bool, to_str
-from fb_tools.errors import HandlerError, ExpectedHandlerError
-from fb_tools.handling_obj import HandlingObject, CalledProcessError
-from fb_tools.handler import BaseHandler
-
-# Own modules
-from .dns import CrTfHandlerDnsMixin
-from .files import CrTfHandlerFilesMixin
-from .first import CrTfHandlerFirstMixin
-from .read import CrTfHandlerReadMixin
-from .vmware import CrTfHandlerVmwMixin
-
-from .. import MIN_VERSION_TERRAFORM, MAX_VERSION_TERRAFORM
-from .. import MIN_VERSION_VSPHERE_PROVIDER
-
-from ..errors import AbortExecution
-
-# from ..tools import password_input
-
-from ..xlate import XLATOR
-
-__version__ = '3.10.0'
-LOG = logging.getLogger(__name__)
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-
-# =============================================================================
-class CreateTerraformHandler(
- BaseHandler, CrTfHandlerFirstMixin, CrTfHandlerReadMixin, CrTfHandlerDnsMixin,
- CrTfHandlerVmwMixin, CrTfHandlerFilesMixin):
- """
- A handler class for creating the terraform environment
- """
-
- re_default = re.compile(r'^\s*defaults?\s*$', re.IGNORECASE)
- re_vm_key = re.compile(r'^\s*vms?\s*$', re.IGNORECASE)
- re_group = re.compile(r'^\s*groups?\s*$', re.IGNORECASE)
- re_group_name = re.compile(r'^\s*name\s*$', re.IGNORECASE)
- re_doublequote = re.compile(r'"')
-
- re_tf_version = re.compile(r'^\s*Terraform\s+v(\S+)', re.IGNORECASE)
-
- std_file_permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
- std_secure_file_permissions = stat.S_IRUSR | stat.S_IWUSR
-
- sshkey_basename = 'id_rsa_cr_vmw_tpl'
-
- open_opts = {}
- if six.PY3:
- open_opts['encoding'] = 'utf-8'
- open_opts['errors'] = 'surrogateescape'
-
- max_groups_depth = 10
-
- tz_name = 'Europe/Berlin'
- tz = pytz.timezone(tz_name)
-
- steps = (
- 'init', 'vmw-init', 'read-yaml', 'pdns-zones', 'vmw-test', 'collect-folders',
- 'vmw-clusters', 'vmw-datastores', 'vmw-ds-clusters', 'vmw-networks', 'vmw-templates',
- 'validate-yaml', 'validate-storage', 'validate-iface', 'validate-dns',
- 'perform-dns', 'project-dir', 'tf-files', 'ensure-vmw-folders',
- )
- step_desc = {
- 'init': _('After initialization of all objects and handlers.'),
- 'vmw-init': _('After initialisation of VSPhere handlers.'),
- 'read-yaml': _('After reading the given YAML file.'),
- 'pdns-zones': _('After retrieving all DNS zones from PowerDNS.'),
- 'vmw-test': _('After testing VSPhere handlers.'),
- 'collect-folders': _('After collecting all VMWare and local folders.'),
- 'vmw-clusters': _('After collecting all VMWare clusters.'),
- 'vmw-datastores': _('After collecting all VMWare datastores.'),
- 'vmw-ds-clusters': _('After collecting all VMWare datastore clusters.'),
- 'vmw-networks': _('After collecting all VMWare networks.'),
- 'vmw-templates': _('After validating all given VMWare templates.'),
- 'validate-yaml': _('After syntax validating of data from loaded YAML file.'),
- 'validate-storage': _('After validating all given storage data.'),
- 'validate-iface': _('After validating all given network interface data.'),
- 'validate-dns': _('After validating all given DNS data.'),
- 'perform-dns': _('After performing all necessary actions in DNS.'),
- 'project-dir': _('After ensuring availability of the project directory.'),
- 'tf-files': _('After creation of the Terraform project files.'),
- 'ensure-vmw-folders': _('After ensuring availability of VM folders in VMWare vSphere.'),
- }
-
- # -------------------------------------------------------------------------
- def __init__(
- self, appname=None, verbose=0, version=__version__, base_dir=None,
- config=None, simulate=False, force=False, ignore_existing_dns=False,
- terminal_has_colors=False, initialized=False):
-
- self.pdns = None
- self.vsphere = {}
- self.config = None
-
- self.terraform_cmd = None
-
- self.yaml_data = None
-
- self.default_vm = None
- self.group_default_vms = {}
-
- self.ignore_existing_dns = bool(ignore_existing_dns)
-
- self.vms = []
- self.vsphere_templates = {}
-
- self.vm_names = []
- self.fqdns = {}
- self.addresses = {}
-
- self.vsphere_folders = []
-
- self.vsphere_user = None
- self.vsphere_password = None
-
- self.used_networks = {}
- self.used_dc_clusters = {}
- self.used_datastores = {}
- self.project_dir = None
- self.project_name = None
-
- self._terraform_root_dir = None
-
- self.all_vms = {}
- self.existing_vms = []
-
- self.start_dir = Path(os.getcwd())
-
- self.script_dir = None
- self.script_dir_rel = None
- self.keys_dir = None
- self.keys_dir_rel = None
- self.private_key = None
- self.private_key_rel = None
-
- self._stop_at_step = None
-
- self.min_version_terraform = None
- if MIN_VERSION_TERRAFORM:
- self.min_version_terraform = LooseVersion(MIN_VERSION_TERRAFORM)
-
- self.max_version_terraform = None
- if MAX_VERSION_TERRAFORM:
- self.max_version_terraform = LooseVersion(MAX_VERSION_TERRAFORM)
-
- self.min_version_vsphere_provider = None
- if MIN_VERSION_VSPHERE_PROVIDER:
- self.min_version_vsphere_provider = LooseVersion(MIN_VERSION_VSPHERE_PROVIDER)
-
- self.dns_mapping = {
- 'forward': [],
- 'reverse': [],
- }
- self.dns_mappings2create = {
- 'forward': [],
- 'reverse': [],
- }
-
- self.updated_zones = []
-
- self.eval_errors = 0
-
- super(CreateTerraformHandler, self).__init__(
- appname=appname, verbose=verbose, version=version, base_dir=base_dir,
- simulate=simulate, force=force, terminal_has_colors=terminal_has_colors,
- initialized=False,
- )
-
- if config:
- self.config = config
- if self.verbose >= 1:
- msg = _("Given configuration:") + '\n' + pp(self.config.as_dict())
- LOG.debug(msg)
-
- self.script_dir = self.base_dir.joinpath('postinstall-scripts')
- LOG.debug(_("Directory for postinstall scripts: {!r}.").format(str(self.script_dir)))
- if not self.script_dir.exists():
- msg = _("Directory for postinstall scripts {!r} does not exists.").format(
- str(self.script_dir))
- raise ExpectedHandlerError(msg)
- if not self.script_dir.is_dir():
- msg = _("Path {!r} for postinstall scripts exists, but is not a directory.").format(
- str(self.script_dir))
- raise ExpectedHandlerError(msg)
-
- self.keys_dir = self.base_dir.joinpath('keys')
- LOG.debug(_("Directory for SSH deploy keys: {!r}.").format(str(self.keys_dir)))
- if not self.keys_dir.exists():
- msg = _("Directory for SSH deploy keys {!r} does not exists.").format(
- str(self.keys_dir))
- raise ExpectedHandlerError(msg)
- if not self.keys_dir.is_dir():
- msg = _("Path {!r} for SSH deploy keys exists, but is not a directory.").format(
- str(self.keys_dir))
- raise ExpectedHandlerError(msg)
-
- self.private_key = self.keys_dir / self.sshkey_basename
- LOG.debug(_("Filename of the private SSH deploy key: {!r}").format(str(self.private_key)))
- if not self.private_key.is_file():
- msg = _(
- "Private SSH deploy key file {!r} does not exists or is not a "
- "regular file.").format(str(self.private_key))
- raise ExpectedHandlerError(msg)
-
- if initialized:
- self.initialized = True
-
- # -----------------------------------------------------------
- @HandlingObject.simulate.setter
- def simulate(self, value):
- self._simulate = to_bool(value)
-
- if self.initialized:
- LOG.debug(_("Setting simulate of all subsequent objects to {!r} ...").format(
- self.simulate))
-
- if self.pdns:
- self.pdns.simulate = self.simulate
-
- for vsphere_name in self.vsphere.keys():
- if self.vsphere[vsphere_name]:
- self.vsphere[vsphere_name].simulate = self.simulate
-
- # -----------------------------------------------------------
- @property
- def stop_at_step(self):
- """Step, at which the execution should be interrupted."""
- return self._stop_at_step
-
- @stop_at_step.setter
- def stop_at_step(self, value):
- if value is None:
- self._stop_at_step = None
- return
- v = str(value).strip().lower().replace('_', '-')
- if v == '':
- self._stop_at_step = None
- return
- if v not in self.steps:
- msg = _("Invalid step name {!r} for interrupting execution.").format(value)
- raise ValueError(msg)
- self._stop_at_step = v
-
- # -----------------------------------------------------------
- @property
- def terraform_root_dir(self):
- """Root directory of all terraform directories."""
- if self.is_venv:
- return self.base_dir.parent
- return self._terraform_root_dir
-
- # -----------------------------------------------------------
- @property
- def full_project_name(self):
- """Complete project name with parent paths."""
- if not self.project_name:
- return None
- if not self.project_dir:
- return None
- if not self.terraform_root_dir:
- return self.project_name
- return os.path.relpath(str(self.project_dir), self.terraform_root_dir)
-
- # -------------------------------------------------------------------------
- def as_dict(self, short=True):
- """
- Transforms the elements of the object into a dict
-
- @param short: don't include local properties in resulting dict.
- @type short: bool
-
- @return: structure as dict
- @rtype: dict
- """
-
- res = super(CreateTerraformHandler, self).as_dict(short=short)
- res['std_file_permissions'] = "{:04o}".format(self.std_file_permissions)
- res['std_secure_file_permissions'] = "{:04o}".format(self.std_secure_file_permissions)
- res['open_opts'] = self.open_opts
- res['stop_at_step'] = self.stop_at_step
- res['steps'] = copy.copy(self.steps)
- res['tz_name'] = self.tz_name
- res['terraform_root_dir'] = self.terraform_root_dir
- res['full_project_name'] = self.full_project_name
- res['vsphere'] = {}
- for vsphere_name in self.vsphere.keys():
- res['vsphere'][vsphere_name] = self.vsphere[vsphere_name].as_dict(short=short)
-
- return res
-
- # -------------------------------------------------------------------------
- @classmethod
- def set_tz(cls, tz_name):
-
- if not tz_name.strip():
- raise ValueError(_("Invalid time zone name {!r}.").format(tz_name))
- tz_name = tz_name.strip()
- LOG.debug(_("Setting time zone to {!r}.").format(tz_name))
- cls.tz = pytz.timezone(tz_name)
- cls.tz_name = tz_name
-
- # -------------------------------------------------------------------------
- def __del__(self):
- """Destructor."""
-
- LOG.debug(_("Self destruction."))
-
- if self.pdns:
- self.pdns = None
-
- if self.vsphere:
- self.vsphere = None
-
- # -------------------------------------------------------------------------
- def __call__(self, yaml_file):
- """Executing the underlying action."""
-
- if not self.initialized:
- raise HandlerError(_("{}-object not initialized.").format(self.__class__.__name__))
-
- try:
-
- if self.simulate:
- print()
- msg_a = _("Simulation mode")
- msg_b = (
- "* " + _("Necessary DNS records are not created."),
- "* " + _("Terraform files are not created.")
- )
- ll = 4
- if len(msg_a) > ll:
- ll = len(msg_a)
- for msg in msg_b:
- if len(msg) > ll:
- ll = len(msg)
-
- print(self.colored('#' * (ll + 4), 'AQUA'))
- line = self.colored('#', 'AQUA') + ' '
- line += self.colored(msg_a.center(ll), 'YELLOW')
- line += ' ' + self.colored('#', 'AQUA')
- print(line)
- for msg in msg_b:
- line = '# ' + msg.ljust(ll) + ' #'
- print(self.colored(line, 'AQUA'))
- print(self.colored('#' * (ll + 4), 'AQUA'))
- print()
-
- self.exec_pdns_zones()
-
- print()
- LOG.info(_("Collecting first information from vSPhere."))
- self.test_vsphere_handlers()
- self.exec_collect_folders(yaml_file)
- self.assign_default_vmw_values()
-
- print()
- LOG.info(_("Retrieving information from vSphere."))
-
- self.exec_vmw_clusters()
- self.exec_vmw_datastores()
- self.exec_vmw_ds_clusters()
- self.exec_vmw_networks()
- self.exec_vmw_templates()
-
- self.exec_validate_yaml()
- self.exec_validate_storage()
- self.exec_validate_iface()
- self.exec_validate_dns()
-
- if self.verbose > 2:
-
- vm_list = []
- for vm in self.vms:
- vm_list.append(vm.as_dict())
- LOG.debug(_("Validated VMs:") + "\n" + pp(vm_list))
-
- if self.existing_vms:
- msg = ngettext(
- "There is one existing virtual machine.",
- "There are {c} existing virtual machines.",
- len(self.existing_vms)).format(c=len(self.existing_vms))
- LOG.warn(msg)
- if self.verbose > 2:
- msg = ngettext(
- "Existing virtual machine:", "Existing virtual machines:",
- len(self.existing_vms))
- LOG.debug(msg + '\n' + pp(self.existing_vms))
- else:
- LOG.info(_("No existing virtual machines found in YAML file."))
-
- self.exec_perform_dns()
- self.exec_project_dir()
-
- self.exec_tf_files()
- self.exec_vsphere_folders()
-
- LOG.info(_("Finished all steps."))
-
- except AbortExecution as e:
- LOG.warn(str(e))
- return
-
- self.exec_terraform()
- if self.simulate:
- print()
- msg = print(self.colored(
- _('And how I said before - it was only a simulation!'), 'AQUA'))
-
- print()
-
- # -------------------------------------------------------------------------·
- def exec_terraform(self):
-
- tf_timeout = 30
-
- print()
- LOG.info(_("Executing {!r} ...").format('terraform init'))
- cmd = [str(self.terraform_cmd), 'init']
- try:
- result = self.run(
- cmd, may_simulate=True, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
- except CalledProcessError as e:
- if e.stdout:
- print(self.colored("Output", 'AQUA') + ':\n' + to_str(e.stdout))
- if e.stderr:
- print(self.colored("Error message", ('BOLD', 'RED')) + ':\n' + to_str(e.stderr))
- raise ExpectedHandlerError(str(e))
- LOG.debug(_("Completed process:") + "\n" + str(result))
-
- if self.existing_vms:
- print()
- LOG.info(_("Importing existing virtual machines ..."))
-
- for vm in self.existing_vms:
-
- vs_name = vm.vsphere
- print()
- LOG.info(_("Importing VM {!r}.").format(vm.name))
- vm_obj = 'vsphere_virtual_machine.{}'.format(vm.tf_name)
- path = '/{dc}/{f}/{p}/{n}'.format(
- dc=self.vsphere[vs_name].dc, f=self.vsphere[vs_name].dc_obj.vm_folder,
- p=vm.path, n=vm.name)
- cmd = [str(self.terraform_cmd), 'import', vm_obj, path]
- try:
- result = self.run(
- cmd, may_simulate=True, timeout=tf_timeout,
- stdout=PIPE, stderr=PIPE, check=True)
- except CalledProcessError as e:
- if e.stdout:
- print(self.colored("Output", 'AQUA') + ':\n' + to_str(e.stdout))
- if e.stderr:
- msg = self.colored("Error message", ('BOLD', 'RED')) + ':\n'
- msg += to_str(e.stderr)
- print(msg)
- LOG.warn(_("Error on importing VM {!r}:").format(vm.name) + ' ' + str(e))
-
- LOG.debug(_("Completed process:") + "\n" + str(result))
-
-# print()
-# LOG.info(_("Executing {!r} ...").format('terraform plan'))
-# cmd = [str(self.terraform_cmd), 'plan']
-# try:
-# result = self.run(
-# cmd, may_simulate=True, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
-# except CalledProcessError as e:
-# if e.stdout:
-# print(self.colored("Output", 'AQUA') + ':\n' + to_str(e.stdout))
-# if e.stderr:
-# print(self.colored("Error message", ('BOLD', 'RED')) + ':\n' + to_str(e.stderr))
-# raise ExpectedHandlerError(str(e))
-# LOG.debug(_("Completed process:") + "\n" + str(result))
-
- goto = Path(os.path.relpath(self.project_dir, self.start_dir))
-
- print()
- print()
- print(self.colored(_("Congratulations!"), 'GREEN'))
- print()
- print(_("Now you are ready to deploy the following virtual machines:"))
- for vm in sorted(self.vms, key=lambda x: x.tf_name):
- print(" * {}".format(vm.fqdn))
- print()
- print(_("To start the deployment process change to directory {}").format(
- self.colored(str(goto), 'GREEN')))
- print()
- print(_("and enter: {}").format(self.colored('terraform apply', 'GREEN')))
- print()
-
-
-# =============================================================================
-
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2024 by Frank Brehm, Berlin
-@summary: A mixin module for the handler module for dns related methods.
-"""
-from __future__ import absolute_import, print_function
-
-# Standard module
-import ipaddress
-import logging
-import socket
-
-# Third party modules
-from fb_tools.common import RE_DOT_AT_END
-from fb_tools.errors import ExpectedHandlerError
-
-# Own modules
-
-from ..errors import AbortExecution
-
-from ..xlate import XLATOR
-
-__version__ = '0.1.1'
-LOG = logging.getLogger(__name__)
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-
-# =============================================================================
-class CrTfHandlerDnsMixin():
- """A mixin module for the handler module for dns related methods."""
-
- # -------------------------------------------------------------------------·
- def exec_pdns_zones(self):
-
- if self.config.no_pdns:
- return
-
- if self.stop_at_step == 'pdns-zones':
- self.incr_verbosity()
-
- print()
- LOG.info(_("Retrieving informations from PowerDNS ..."))
-
- self.pdns.get_api_zones()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in exploring PowerDNS zones.",
- "Found {n} errors in exploring PowerDNS zones.",
- self.eval_errors).format(n=self.eval_errors)
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Finished step {!r}.").format('pdns-zones'))
- if self.stop_at_step == 'pdns-zones':
- raise AbortExecution('pdns-zones')
-
- # -------------------------------------------------------------------------·
- def exec_validate_dns(self):
-
- if self.stop_at_step == 'validate-dns':
- self.incr_verbosity()
-
- self.validate_dns_mappings()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in validating DNS mappings.",
- "Found {n} errors in validating DNS mappings.",
- self.eval_errors).format(n=self.eval_errors)
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Finished step {!r}.").format('validate-dns'))
- if self.stop_at_step == 'validate-dns':
- raise AbortExecution('validate-dns')
-
- # -------------------------------------------------------------------------·
- def exec_perform_dns(self):
-
- if self.stop_at_step == 'perform-dns':
- self.incr_verbosity()
-
- self.perform_dns()
-
- LOG.info(_("Finished step {!r}.").format('perform-dns'))
- if self.stop_at_step == 'perform-dns':
- raise AbortExecution('perform-dns')
-
- # --------------------------------------------------------------------------
- def perform_dns(self):
-
- if self.config.no_pdns:
- LOG.debug(_("Power DNS actions are not executed."))
- return
-
- print()
- LOG.info(_("Performing DNS actions ..."))
- print()
-
- # TODO: Check for simulate and mappings to create
-
- errors = 0
-
- for (fqdn, address) in self.dns_mappings2create['forward']:
- if not self._perform_dns_forward(fqdn, address):
- errors += 1
-
- for (address, fqdn) in self.dns_mappings2create['reverse']:
- if not self._perform_dns_reverse(address, fqdn):
- errors += 1
-
- if errors:
- msg = ngettext(
- "There was one error in creating DNS mappings.",
- "There were {n} errors in creating DNS mappings.", errors).format(n=errors)
- raise ExpectedHandlerError(msg)
- else:
- if self.verbose > 1:
- LOG.debug(_("No errors in creating DNS mappings."))
-
- print()
-
- for zone_name in self.updated_zones:
- self._increase_zone_serial(zone_name)
-
- # --------------------------------------------------------------------------
- def _increase_zone_serial(self, zone_name):
-
- LOG.info(_("Increasing serial of zone {!r}.").format(zone_name))
-
- zone = self.pdns.zones[zone_name]
- zone.increase_serial()
- zone.notify()
-
- # --------------------------------------------------------------------------
- def _perform_dns_forward(self, fqdn, address):
-
- record_type = 'A'
- addr_obj = ipaddress.ip_address(address)
- if addr_obj.version == 6:
- record_type = 'AAAA'
-
- canon_fqdn = self.pdns.canon_name(fqdn)
-
- zone_name = self.pdns.get_zone_for_item(canon_fqdn, is_fqdn=True)
- if zone_name:
- if self.verbose > 1:
- LOG.debug(_("Got zone {z!r} for FQDN {f!r}.").format(
- z=zone_name, f=canon_fqdn))
- else:
- LOG.error(_("Did not found zone to insert {t}-record for {f!r}.").format(
- t=record_type, f=fqdn))
- return False
-
- zone = self.pdns.zones[zone_name]
- if addr_obj.is_private:
- zone.add_address_record(
- fqdn, address, set_ptr=False, comment='local',
- account=self.config.pdns_comment_account, append_comments=True)
- else:
- zone.add_address_record(fqdn, address, set_ptr=False)
- if zone_name not in self.updated_zones:
- self.updated_zones.append(zone_name)
- return True
-
- # --------------------------------------------------------------------------
- def _perform_dns_reverse(self, address, fqdn):
-
- LOG.debug(_("Trying to create PTR-record {a!r} => {f!r}.").format(
- f=fqdn, a=str(address)))
-
- pointer = self.pdns.canon_name(address.reverse_pointer)
- if self.verbose > 1:
- LOG.debug(_("PTR of {a!r}: {p!r}.").format(a=str(address), p=pointer))
-
- zone_name = self.pdns.get_zone_for_item(pointer, is_fqdn=True)
- if zone_name:
- if self.verbose > 1:
- LOG.debug(_("Got reverse zone {z!r} for address {a!r}.").format(
- z=zone_name, a=str(address)))
- else:
- LOG.warn(_("Did not found zone to insert PTR-record {p!r} ({a}).").format(
- p=pointer, a=str(address)))
- return True
-
- zone = self.pdns.zones[zone_name]
- zone.add_ptr_record(pointer, fqdn)
- if zone_name not in self.updated_zones:
- self.updated_zones.append(zone_name)
- return True
-
- # -------------------------------------------------------------------------·
- def validate_dns_mappings(self):
-
- LOG.info(_("Validating DNS mappings ..."))
- self._validate_forward_dns_mappings()
- self._validate_reverse_dns_mappings()
-
- lines = []
- if self.dns_mappings2create['forward']:
- for pair in self.dns_mappings2create['forward']:
- line = ' * {n!r} => {a!r}'.format(n=pair[0], a=str(pair[1]))
- lines.append(line)
- else:
- lines.append(self.colored('>>> ' + _('None') + ' <<<', 'AQUA'))
- LOG.info(_("Forward DNS entries to create:") + "\n" + '\n'.join(lines))
-
- lines = []
- if self.dns_mappings2create['reverse']:
- for pair in self.dns_mappings2create['reverse']:
- line = ' * {r} ({a!r}) => {n!r}'.format(
- r=pair[0].reverse_pointer, n=pair[1], a=str(pair[0]))
- lines.append(line)
- else:
- lines.append(self.colored('>>> ' + _('None') + ' <<<', 'AQUA'))
- LOG.info(_("Reverse DNS entries to create:") + "\n" + '\n'.join(lines))
-
- # -------------------------------------------------------------------------·
- def _validate_forward_dns_mappings(self):
-
- if not self.dns_mapping['forward']:
- return
-
- LOG.debug(_("Validating forward DNS mappings ..."))
-
- for (fqdn, address) in self.dns_mapping['forward']:
-
- if self.verbose > 1:
- LOG.debug(_("Validating {f!r} => {a!r}.").format(f=fqdn, a=str(address)))
-
- results_v4 = []
- results_v6 = []
-
- try:
- addr_infos = socket.getaddrinfo(fqdn, 80)
- except socket.gaierror:
- addr_infos = []
-
- for addr_info in addr_infos:
- if addr_info[0] not in (socket.AF_INET, socket.AF_INET6):
- continue
- addr = ipaddress.ip_address(addr_info[4][0])
- if addr.version == 4:
- if addr not in results_v4:
- results_v4.append(addr)
- else:
- if addr not in results_v6:
- results_v6.append(addr)
- if self.verbose > 2:
- if results_v4 or results_v6:
- lines = []
- for addr in results_v4 + results_v6:
- lines.append(' * {}'.format(str(addr)))
- out = '\n'.join(lines)
- LOG.debug(_("Found existing addresses for {f!r}:").format(f=fqdn) + '\n' + out)
- else:
- LOG.debug(_("Did not found existing addresses for {!r}.").format(fqdn))
-
- if address.version == 4:
- if not results_v4:
- self.dns_mappings2create['forward'].append((fqdn, address))
- continue
- if address in results_v4:
- LOG.debug(_("FQDN {f!r} already points to {a!r}.").format(
- f=fqdn, a=str(address)))
- continue
- else:
- if not results_v6:
- self.dns_mappings2create['forward'].append((fqdn, address))
- continue
- if address in results_v6:
- LOG.debug(_("FQDN {f!r} already points to {a!r}.").format(
- f=fqdn, a=str(address)))
- continue
-
- alist = '\n'.join(map(lambda x: ' * {}'.format(str(x)), results_v4 + results_v6))
- msg = (_(
- "FQDN {f!r} has already existing addresses, "
- "but none of them are {a!r}:").format(f=fqdn, a=str(address)) + "\n" + alist)
- if self.ignore_existing_dns:
- LOG.warn(msg)
- self.dns_mappings2create['forward'].append((fqdn, address))
- else:
- LOG.error(msg)
- self.eval_errors += 1
-
- # -------------------------------------------------------------------------·
- def _validate_reverse_dns_mappings(self):
-
- if not self.dns_mapping['reverse']:
- return
-
- LOG.debug(_("Validating reverse DNS mappings ..."))
-
- for (address, fqdn) in self.dns_mapping['reverse']:
-
- if self.verbose > 1:
- LOG.debug(_("Validating {a!r} => {f!r}.").format(f=fqdn, a=str(address)))
-
- try:
- info = socket.gethostbyaddr(str(address))
- except socket.herror:
- info = []
- if self.verbose > 2:
- LOG.debug(_("Got reverse info:") + "\n" + str(info))
- ptr = None
- if info:
- ptr = info[0]
-
- if not ptr:
- if self.verbose > 1:
- LOG.debug(_("Did not found reverse pointer for {!r}.").format(str(address)))
- self.dns_mappings2create['reverse'].append((address, fqdn))
- continue
-
- ptr = RE_DOT_AT_END.sub('', ptr).lower()
- fqdn_canon = RE_DOT_AT_END.sub('', fqdn).lower()
-
- if self.verbose > 1:
- LOG.debug(_("Found reverse pointer {a!r} => {f!r}.").format(f=ptr, a=str(address)))
- if fqdn_canon == ptr:
- if self.verbose > 1:
- LOG.debug(_("Reverse pointer for {!r} was already existing.").format(
- str(address)))
- continue
-
- LOG.error(_("Address {a!r} has already an existing reverse pointer to {p!r}.").format(
- a=str(address), p=ptr))
- self.eval_errors += 1
-
-
-# =============================================================================
-
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2024 by Frank Brehm, Berlin
-@summary: A mixin module for the handler for methods for creating terraform project files.
-"""
-from __future__ import absolute_import, print_function
-
-# Standard module
-import logging
-import os
-import shutil
-import stat
-import textwrap
-
-from pathlib import Path
-
-# Third party modules
-
-from fb_tools.common import pp
-from fb_tools.errors import ExpectedHandlerError
-
-# Own modules
-from ..errors import AbortExecution
-
-
-from ..xlate import XLATOR
-
-__version__ = '0.5.3'
-LOG = logging.getLogger(__name__)
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-
-# =============================================================================
-class CrTfHandlerFilesMixin():
- """A mixin module for the handler module for reading and evaluation."""
-
- # -------------------------------------------------------------------------·
- def exec_project_dir(self):
-
- if self.stop_at_step == 'project-dir':
- self.incr_verbosity()
-
- self.ensure_project_dir()
- self.clean_project_dir()
-
- LOG.info(_("Finished step {!r}.").format('project-dir'))
- if self.stop_at_step == 'project-dir':
- raise AbortExecution('project-dir')
-
- # -------------------------------------------------------------------------·
- def exec_tf_files(self):
-
- if self.stop_at_step == 'tf-files':
- self.incr_verbosity()
-
- self.create_terraform_files()
-
- LOG.info(_("Finished step {!r}.").format('tf-files'))
- if self.stop_at_step == 'tf-files':
- raise AbortExecution('tf-files')
-
- # -------------------------------------------------------------------------·
- def get_tf_name_network(self, net_name, *args):
-
- default = None
- has_default = False
- if len(args):
- if len(args) > 1:
- msg = ngettext(
- "Method {c}.{m} expected at most one argument, got {n}.",
- "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
- c=self.__class__.__name__, e=2, m='get_tf_name_network', n=len(args))
- raise TypeError(msg)
- default = args[0]
- has_default = True
-
- if net_name in self.vsphere.network_mapping:
- return self.vsphere.network_mapping[net_name]
- if has_default:
- return default
- raise KeyError(_("Did not found network {!r}.").format(net_name))
-
- # --------------------------------------------------------------------------
- def get_tf_name_ds_cluster(self, dsc_name, *args):
-
- default = None
- has_default = False
- if len(args):
- if len(args) > 1:
- msg = ngettext(
- "Method {c}.{m} expected at most one argument, got {n}.",
- "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
- c=self.__class__.__name__, e=2, m='get_tf_name_ds_cluster', n=len(args))
- raise TypeError(msg)
- default = args[0]
- has_default = True
-
- if dsc_name in self.vsphere.ds_cluster_mapping:
- return self.vsphere.ds_cluster_mapping[dsc_name]
- if has_default:
- return default
- raise KeyError(_("Did not found datastore cluster {!r}.").format(dsc_name))
-
- # --------------------------------------------------------------------------
- def get_tf_name_datastore(self, ds_name, *args):
-
- default = None
- has_default = False
- if len(args):
- if len(args) > 1:
- msg = ngettext(
- "Method {c}.{m} expected at most one argument, got {n}.",
- "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
- c=self.__class__.__name__, e=2, m='get_tf_name_datastore', n=len(args))
- raise TypeError(msg)
- default = args[0]
- has_default = True
-
- if ds_name in self.vsphere.ds_mapping:
- return self.vsphere.ds_mapping[ds_name]
- if has_default:
- return default
- raise KeyError(_("Did not found datastore {!r}.").format(ds_name))
-
- # --------------------------------------------------------------------------
- def ensure_project_dir(self):
-
- print()
- LOG.info(_("Ensuring existence of directory {!r}.").format(str(self.project_dir)))
-
- if self.project_dir.exists():
- if self.project_dir.is_dir():
- LOG.debug(_("Directory {!r} already exists.").format(str(self.project_dir)))
- else:
- msg = _("Path {!r} exists, but is not a directory.").format(str(self.project_dir))
- raise ExpectedHandlerError(msg)
- else:
- LOG.info(_("Creating directory {!r} ...").format(str(self.project_dir)))
- if self.simulate:
- LOG.debug(_("Simulation mode - directory will not be created."))
- else:
- try:
- os.makedirs(str(self.project_dir), mode=0o755)
- except PermissionError as e:
- msg = _("Could not create directory {d!r}: {e}").format(
- d=str(self.project_dir), e=e)
- raise ExpectedHandlerError(msg)
-
- if not self.project_dir.exists():
- if self.simulate:
- return
- else:
- msg = _("Directory {!r} does not exists ?!?!").format(str(self.project_dir))
- raise ExpectedHandlerError(msg)
-
- if not os.access(str(self.project_dir), os.W_OK):
- msg = _("No write access to directory {!r}.").format(str(self.project_dir))
- raise ExpectedHandlerError(msg)
-
- LOG.debug(_("Changing into directory {!r}.").format(str(self.project_dir)))
- os.chdir(str(self.project_dir))
-
- self.script_dir_rel = Path(os.path.relpath(
- str(self.script_dir), str(self.project_dir)))
- LOG.debug(_("Script-Dir relative to project dir: {!r}.").format(str(self.script_dir_rel)))
-
- filemode = stat.S_IMODE(self.private_key.stat().st_mode)
- LOG.debug(_("Permissions of {k!r} are {m:04o}.").format(
- k=str(self.private_key), m=filemode))
- if filemode not in [0o400, 0o600]:
- LOG.info(_("Setting permissions of {k!r} from {o:04o} to {m:04o}.").format(
- k=str(self.private_key), o=filemode, m=0o600))
- self.private_key.chmod(0o600)
-
- self.keys_dir_rel = Path(os.path.relpath(
- str(self.keys_dir), str(self.project_dir)))
- LOG.debug(_("Directory for SSH deploy keys relative to project dir: {!r}.").format(
- str(self.keys_dir_rel)))
-
- self.private_key_rel = self.keys_dir_rel / self.sshkey_basename
- LOG.debug(_(
- "Filename of the private SSH deploy key relative to project "
- "dir: {!r}").format(str(self.private_key_rel)))
-
- if self.verbose > 1:
- LOG.debug(_("Checking {!r} for a previous terraform configuration.").format(
- str(self.project_dir)))
-
- tf_path = self.project_dir / '.terraform'
- if tf_path.exists() and not tf_path.is_dir():
- msg = _("In {d!r} there exists already {w!r}, but this is not a directory.").format(
- d=str(self.project_dir), w='.terraform')
- raise ExpectedHandlerError(msg)
-
- state_path = self.project_dir / 'terraform.tfstate'
- if state_path.exists() and not state_path.is_file():
- msg = _("In {d!r} there exists already {w!r}, but this not a file.").format(
- d=str(self.project_dir), w='terraform.tfstate')
- raise ExpectedHandlerError(msg)
-
- if tf_path.is_dir() and state_path.is_file():
- msg = _(
- "In directory {d!r} there are already existing both {w1!r} and {w2!r}. "
- "Is this an old terraform project?").format(
- d=str(self.project_dir), w1='.terraform', w2='terraform.tfstate')
- raise ExpectedHandlerError(msg)
-
- # --------------------------------------------------------------------------
- def clean_project_dir(self):
-
- print()
- LOG.info(_("Cleaning project directory {!r}.").format(str(self.project_dir)))
-
- files = []
- for path in self.project_dir.glob('*'):
- files.append(path)
- for path in self.project_dir.glob('.terraform'):
- files.append(path)
-
- if not files:
- LOG.debug(_("Directory {!r} is already clean.").format(str(self.project_dir)))
- return
- for pfile in files:
- if pfile.exists():
- if pfile.is_dir():
- LOG.debug(_("Removing recursive directory {!r} ...").format(str(pfile)))
- if not self.simulate:
- shutil.rmtree(str(pfile))
- else:
- LOG.debug(_("Removing {!r} ...").format(str(pfile)))
- if not self.simulate:
- pfile.unlink()
-
- # --------------------------------------------------------------------------
- def create_terraform_files(self):
-
- print()
- print()
- msg = _("Creating all necessary files for terraform.")
- ll = 6
- if len(msg) > ll:
- ll = len(msg)
- print(self.colored('#' * (ll + 6), 'AQUA'))
- line = self.colored('#', 'AQUA') + ' '
- line += self.colored(msg.center(ll), 'YELLOW')
- line += ' ' + self.colored('#', 'AQUA')
- print(line)
- print(self.colored('#' * (ll + 6), 'AQUA'))
- print()
- print()
-
- self.create_varfiles()
- self.create_dcfile()
- self.create_backend_file()
- self.create_instance_files()
-
- # --------------------------------------------------------------------------
- def create_varfiles(self):
-
- LOG.debug(_("Creating {!r} ...").format('terraform.tfvars'))
-
- vs_name = None
- for vs_name in self.vsphere.keys():
- break
- if self.verbose > 1:
- LOG.debug(_("Creating {w} for VSPhere {v!r} ...").format(
- w='dcfile', v=vs_name))
-
- vs_host = self.config.vsphere[vs_name].host
- vs_user = self.config.vsphere[vs_name].user
- vs_pwd = self.config.vsphere[vs_name].password
- vs_dc = self.config.vsphere[vs_name].dc
-
- rhsm_user = self.config.rhsm_user
- rhsm_password = self.config.rhsm_password
-
- content = textwrap.dedent('''\
- ## filename: terraform.tfvars
- ## This file declares the values for the variables to be used in the instance.tf playbook
-
- #
- # ATTENTION!
- #
- # To avoid annoying questions for password and API key
- # create manually a file 'terraform-private.auto.tfvars"
- # with the following content:
- #
- # vsphere_username = "<USERNAME>"
- # vsphere_userpassword = "<PASSWORD>"
- # rhsm_user_password = "<PASSWORD>"
- #
- # with the correct values. This file will not be under GIT control
- #
-
- ''')
-
- if self.simulate:
- if self.verbose:
- print(content)
- else:
- with open('terraform.tfvars', 'w', **self.open_opts) as fh:
- fh.write(content)
- os.chmod('terraform.tfvars', self.std_file_permissions)
-
- # Sensible stuff
- if vs_user or vs_pwd:
- content = '# Private sensible information. Please keep this file secret.\n\n'
- if vs_user:
- content += 'vsphere_username = "{}"\n'.format(vs_user)
- if vs_pwd:
- content += 'vsphere_userpassword = "{}"\n'.format(vs_pwd)
- if rhsm_user:
- content += 'rhsm_user_name = "{}"\n'.format(rhsm_user)
- if rhsm_password:
- content += 'rhsm_user_password = "{}"\n'.format(rhsm_password)
- content += '\n'
-
- LOG.debug(_("Creating {!r} ...").format('private.auto.tfvars'))
- if self.simulate:
- if self.verbose:
- print(content)
- else:
- with open('private.auto.tfvars', 'w', **self.open_opts) as fh:
- fh.write(content)
- os.chmod('private.auto.tfvars', self.std_secure_file_permissions)
-
- # File with variable declarations
- content = textwrap.dedent('''\
- # filename: variables.tf
- # definition of the variables to be used in the play
- # declaration happens in the file terraform.tfvars and private.auto.tfvars
-
- ''')
-
- tpl = textwrap.dedent('''\
- variable "vsphere_vcenter" {{
- default = "{}"
- description = "IP or DNS of the vSphere center."
- type = string
- }}
-
- ''')
- content += tpl.format(vs_host)
-
- tpl = textwrap.dedent('''\
- variable "vsphere_username" {
- description = "vSphere accountname to be used."
- type = string
- }
-
- variable "vsphere_userpassword" {
- description = "Password for vSphere accountname."
- type = string
- }
-
- ''')
- content += tpl
-
- tpl = textwrap.dedent('''\
- variable "vsphere_datacenter" {{
- default = "{dc}"
- description = "Name of the vSphere datacenter to use."
- type = string
- }}
-
- ''')
- content += tpl.format(dc=vs_dc)
-
- tpl = textwrap.dedent('''\
- variable "rhsm_user_name" {{
- default = "{rhsm_user}"
- description = "Username of the RedHat subscription management user."
- type = string
- }}
-
- variable "rhsm_user_password" {{
- description = "Password of the RedHat subscription management user."
- type = string
- }}
-
- ''')
- content += tpl.format(rhsm_user=self.config.default_rhsm_user)
-
- tpl = textwrap.dedent('''\
- variable "timezone" {{
- default = "{tz}"
- description = "The global timezone used for VMs"
- type = string
- }}
-
- ''')
- content += tpl.format(tz=self.tz_name)
-
- LOG.debug(_("Creating {!r} ...").format('variables.tf'))
- if self.simulate:
- if self.verbose:
- print(content)
- else:
- with open('variables.tf', 'w', **self.open_opts) as fh:
- fh.write(content)
- os.chmod('variables.tf', self.std_file_permissions)
-
- # --------------------------------------------------------------------------
- def create_dcfile(self):
-
- vs_name = None
- for vs_name in self.vsphere.keys():
- break
- vsphere = self.vsphere[vs_name]
-
- LOG.debug(_("Creating {!r} ...").format('dc.tf'))
- if self.verbose > 1:
- LOG.debug(_("Creating {w} for VSPhere {v!r} ...").format(
- w='dcfile', v=vs_name))
-
- content = textwrap.dedent('''\
- # filename: dc.tf
- # Configuring the VMware VSphere Provider and some dependend common used objects
-
- provider "vsphere" {
- vsphere_server = var.vsphere_vcenter
- user = var.vsphere_username
- password = var.vsphere_userpassword
- allow_unverified_ssl = true
- ''')
-
-# if self.min_version_vsphere_provider:
-# content += ' version = ">= {}"\n'.format(
-# str(self.min_version_vsphere_provider))
-
- content += textwrap.dedent('''\
- }
-
- data "vsphere_datacenter" "dc" {
- name = var.vsphere_datacenter
- }
-
- ''')
-
- for cluster in vsphere.clusters:
- tpl = textwrap.dedent('''\
- data "vsphere_resource_pool" "{pv}" {{
- name = "{pn}"
- datacenter_id = data.vsphere_datacenter.dc.id
- }}
-
- ''')
- content += tpl.format(
- pv=cluster.resource_pool_var, pn=cluster.resource_pool_name)
-
- if self.used_dc_clusters:
- for dsc_name in sorted(self.used_dc_clusters[vs_name], key=str.lower):
- dsc_tf_name = vsphere.ds_cluster_mapping[dsc_name]
- tpl = textwrap.dedent('''\
- data "vsphere_datastore_cluster" "{tn}" {{
- name = "{n}"
- datacenter_id = data.vsphere_datacenter.dc.id
- }}
-
- ''')
- content += tpl.format(tn=dsc_tf_name, n=dsc_name)
-
- if self.used_datastores:
- for ds_name in sorted(self.used_datastores[vs_name], key=str.lower):
- ds_tf_name = vsphere.ds_mapping[ds_name]
- tpl = textwrap.dedent('''\
- data "vsphere_datastore" "{tn}" {{
- name = "{n}"
- datacenter_id = data.vsphere_datacenter.dc.id
- }}
-
- ''')
- content += tpl.format(tn=ds_tf_name, n=ds_name)
-
- for net_name in sorted(self.used_networks[vs_name], key=str.lower):
- net_tf_name = vsphere.network_mapping[net_name]
- tpl = textwrap.dedent('''\
- data "vsphere_network" "{tn}" {{
- name = "{n}"
- datacenter_id = data.vsphere_datacenter.dc.id
- }}
-
- ''')
- content += tpl.format(n=net_name, tn=net_tf_name)
-
- if self.vsphere_templates:
- for tname in sorted(self.vsphere_templates[vs_name].keys(), key=str.lower):
- tpl_tf_name = self.vsphere_templates[vs_name][tname].tf_name
- tpl = textwrap.dedent('''\
- data "vsphere_virtual_machine" "{tn}" {{
- name = "{n}"
- datacenter_id = data.vsphere_datacenter.dc.id
- }}
-
- ''')
- content += tpl.format(tn=tpl_tf_name, n=tname)
-
- tpl = textwrap.dedent('''\
- data "vsphere_tag_category" "{cid}" {{
- name = "{cname}"
- }}
-
- data "vsphere_tag" "{tid}" {{
- name = "{tname}"
- category_id = data.vsphere_tag_category.{cid}.id
- }}
-
- ''').format(
- cid=self.config.vsphere_tag_cat_os_id,
- cname=self.config.vsphere_tag_cat_os_name,
- cdesc=self.config.vsphere_tag_cat_os_desc,
- tid=self.config.vsphere_tag_os_rhel_id,
- tname=self.config.vsphere_tag_os_rhel_name,
- tdesc=self.config.vsphere_tag_os_rhel_desc,
- )
- content += tpl
-
-
- if self.simulate:
- if self.verbose:
- print(content)
- else:
- with open('dc.tf', 'w', **self.open_opts) as fh:
- fh.write(content)
- os.chmod('dc.tf', self.std_file_permissions)
-
- # --------------------------------------------------------------------------
- def create_backend_file(self):
-
- file_name = 'backend.tf'
- LOG.debug(_("Creating {!r} ...").format(file_name))
-
- tpl = textwrap.dedent('''\
- # Configuration of the backend for storing the terraform status information
- # and the minimum required version of terraform
-
- terraform {{
- backend "consul" {{
- address = "{host}"
- scheme = "{scheme}"
- path = "{prefix}/{project}"
- }}
- ''')
-
- project = self.full_project_name
- if not project:
- project = self.project_name
-
- content = tpl.format(
- host=self.config.tf_backend_host, scheme=self.config.tf_backend_scheme,
- prefix=self.config.tf_backend_path_prefix, project=project)
-
- if self.min_version_terraform:
- content += ' required_version = ">= {}"\n'.format(str(self.min_version_terraform))
- else:
- LOG.warn(_("No minimum version of Terraform defined."))
-
- content += '}\n\n'
-
- if self.simulate:
- if self.verbose:
- print(content)
- else:
- with open(file_name, 'w', **self.open_opts) as fh:
- fh.write(content)
- os.chmod(file_name, self.std_file_permissions)
-
- # --------------------------------------------------------------------------
- def create_instance_files(self):
-
- LOG.debug(_("Creating terraform files for VM instances."))
-
- for vm in sorted(self.vms, key=lambda x: x.tf_name):
- self.create_instance_file(vm)
-
- # --------------------------------------------------------------------------
- def create_instance_file(self, vm):
-
- vs_name = vm.vsphere
-
- fname = 'instance.' + vm.name + '.tf'
- LOG.debug(_("Creating file {f!r} for VM instance {n!r}.").format(
- f=fname, n=vm.name))
-
- guest_id = self.config.guest_id
- tpl_vm = None
- if vm.vm_template:
- tpl_vm = self.vsphere_templates[vs_name][vm.vm_template]
- if self.verbose > 3:
- LOG.debug(_("Using template:") + "\n" + pp(tpl_vm))
- guest_id = 'data.vsphere_virtual_machine.{}.guest_id'.format(tpl_vm.tf_name)
- else:
- guest_id = '"' + guest_id + '"'
-
- content = self._create_instfile_general(vm, guest_id, tpl_vm)
-
- i = 0
- for iface in vm.interfaces:
- content += self._create_instfile_if(vm, iface, i, tpl_vm)
- i += 1
-
- for disk_name in sorted(vm.disks.keys()):
- content += self._create_instfile_disk(vm, disk_name)
-
- content += self._create_instfile_custom(vm, tpl_vm)
-
- if self.verbose > 1:
- LOG.debug(_("Writing {!r}").format(fname))
-
- if self.simulate:
- if self.verbose:
- print(content)
- else:
- with open(fname, 'w', **self.open_opts) as fh:
- fh.write(content)
- os.chmod(fname, self.std_file_permissions)
-
- # --------------------------------------------------------------------------
- def _create_instfile_general(self, vm, guest_id, tpl_vm):
-
- vs_name = vm.vsphere
-
- # ## General definitions of VM
- if self.verbose > 1:
- LOG.debug(_("Generating global definitions of {!r}.").format(vm.name))
- content = textwrap.dedent('''\
- # Definition of the VM instance {!r}.
-
- ''').format(vm.name)
-
- cluster = self.vsphere[vs_name].get_cluster_by_name(vm.cluster)
- if not cluster:
- msg = _("Cluster {!r} not found - this shouldn't be happened.").format(
- vm.cluster)
- raise RuntimeError(msg)
-
- content += textwrap.dedent('''\
- resource "vsphere_virtual_machine" "{tn}" {{
-
- resource_pool_id = data.vsphere_resource_pool.{pv}.id
- name = "{n}"
- ''').format(tn=vm.tf_name, n=vm.name, pv=cluster.resource_pool_var)
-
- if vm.ds_cluster:
- dsc_tf_name = self.vsphere[vs_name].ds_cluster_mapping[vm.ds_cluster]
- tpl = ' datastore_cluster_id = data.vsphere_datastore_cluster.{}.id\n'
- content += tpl.format(dsc_tf_name)
-
- if vm.datastore:
- ds_tf_name = self.vsphere[vs_name].ds_mapping[vm.datastore]
- tpl = ' datastore_id = data.vsphere_datastore.{}.id\n'
- content += tpl.format(ds_tf_name)
-
- content += textwrap.indent(textwrap.dedent('''\
- num_cpus = "{cpu}"
- folder = "{f}"
- num_cores_per_socket = "1"
- cpu_hot_add_enabled = "true"
- cpu_hot_remove_enabled = "true"
- memory = "{m}"
- memory_hot_add_enabled = "true"
- boot_delay = "{b}"
- guest_id = {g}
- scsi_controller_count = "{c_count}"
- '''), ' ').format(
- g=guest_id, cpu=vm.num_cpus, f=vm.folder, m=vm.memory, b=int(vm.boot_delay * 1000),
- c_count=vm.disks.get_ctrlr_count())
- if vm.vm_template:
- tpl = ' scsi_type = data.vsphere_virtual_machine.{}.scsi_type\n'
- content += tpl.format(tpl_vm.tf_name)
- content += ' enable_disk_uuid = "true"\n\n'
-
- if vm.is_rhel:
- content += ' tags = [\n data.vsphere_tag.{}.id\n ]\n\n'.format(
- self.config.vsphere_tag_os_rhel_id)
-
- content += textwrap.indent(textwrap.dedent('''\
- lifecycle {
- ignore_changes = all
- }
- '''), ' ')
- content += '\n'
-
- return content
-
- # --------------------------------------------------------------------------
- def _create_instfile_if(self, vm, iface, i, tpl_vm):
-
- vs_name = vm.vsphere
-
- # ## Interface definition
-
- if self.verbose > 1:
- LOG.debug(_("Generating interface definition {i} of {v!r}.").format(i=i, v=vm.name))
- nw = iface.network
- nw_name = self.vsphere[vs_name].network_mapping[nw]
-
- content = textwrap.indent(textwrap.dedent('''\
- network_interface {{
- network_id = data.vsphere_network.{n}.id
- adapter_type = data.{vvm}.{t}.{nit}[0]
- }}
- '''), ' ').format(
- n=nw_name, t=tpl_vm.tf_name,
- vvm='vsphere_virtual_machine', nit='network_interface_types')
- content += '\n'
-
- return content
-
- # --------------------------------------------------------------------------
- def _create_instfile_disk(self, vm, disk_name):
-
- # ## Disk definitions
- if self.verbose > 1:
- LOG.debug(_("Generating disk definition {n} of {v!r}.").format(n=disk_name, v=vm.name))
- disk = vm.disks[disk_name]
- content = textwrap.indent(textwrap.dedent('''\
- disk {{
- label = "{n}"
- size = "{s}"
- eagerly_scrub = "false"
- thin_provisioned = "false"
- unit_number = {i}
- '''), ' ').format(n=disk_name, i=disk.unit_number, s=int(disk.size_gb))
-
- content += ' }\n\n'
-
- return content
-
- # --------------------------------------------------------------------------
- def _create_instfile_custom(self, vm, tpl_vm):
-
- # ## Customization of VM
- if self.verbose > 1:
- LOG.debug(_("Generating customization of {v!r}.").format(v=vm.name))
-
- content = textwrap.indent(textwrap.dedent('''\
- clone {{
- template_uuid = data.vsphere_virtual_machine.{t}.id
-
- customize {{
- linux_options {{
- host_name = "{h}"
- domain = "{d}"
- time_zone = var.timezone
- }}
-
- '''), ' ').format(
- t=tpl_vm.tf_name, h=vm.hostname, d=vm.domain)
-
- content += self._create_instfile_nw(vm)
- content += ' }\n'
- content += ' }\n\n'
-
- # ## local SSH cleanup before any actions
- content += textwrap.indent(textwrap.dedent('''\
- provisioner "local-exec" {{
- command = "ssh-keygen -R {h} || true"
- }}
-
- provisioner "local-exec" {{
- command = "ssh-keygen -R {i} || true"
- }}
-
- '''), ' ').format(h=vm.fqdn, i=vm.interfaces[0].address)
-
- # ## Copying postinstall scripts to VM
-
- files = ['functions.rc', 'conf-resolver', 'create-motd']
- if vm.is_rhel:
- files.append('register-rhel')
- files.append('update-networkmanager')
- if vm.has_puppet:
- files.append('init-puppet')
- files.append('update-all-packages')
-
- for sname in files:
-
- if self.verbose > 1:
- LOG.debug(_("Generating file provisioner for {f!r} of {v!r}.").format(
- f=sname, v=vm.name))
-
- content += textwrap.indent(textwrap.dedent('''\
- provisioner "file" {{
- source = "{d}/{f}"
- destination = "/tmp/{f}"
- connection {{
- type = "ssh"
- host = "{h}"
- user = "root"
- private_key = file("{k}")
- agent = "false"
- }}
- }}
-
- '''), ' ').format(
- d=self.script_dir_rel, f=sname, h=vm.fqdn, k=self.private_key_rel)
-
- if vm.is_rhel:
- if self.verbose > 1:
- LOG.debug(_("Generating file provisioner for {f!r} of {v!r}.").format(
- f='rhsm-user-passwd', v=vm.name))
-
- content += textwrap.indent(textwrap.dedent('''\
- provisioner "file" {{
- destination = "/tmp/rhsm-user-passwd"
- content = "${{var.rhsm_user_password}}"
- connection {{
- type = "ssh"
- host = "{h}"
- user = "root"
- private_key = file("{k}")
- agent = "false"
- }}
- }}
-
- '''), ' ').format(h=vm.fqdn, k=self.private_key_rel)
-
- # ## Postinstall commands on host
- commands = []
-
- commands.append("usermod -c 'root {}' root".format(vm.fqdn))
-
- commands.append("chmod +x /tmp/conf-resolver")
- cmd = '/tmp/conf-resolver'
- for ns in vm.nameservers:
- cmd += ' --ns {!r}'.format(str(ns))
- for dom in vm.searchdomains:
- cmd += ' --search {!r}'.format(dom)
- if vm.dns_options:
- cmd += ' --options {!r}'.format(vm.dns_options)
- else:
- cmd += ' --options {!r}'.format('')
- commands.append(cmd)
- commands.append("rm -fv /tmp/conf-resolver")
-
- purpose = self.re_doublequote.sub('\\\"', vm.purpose)
-
- zone = "{z}/{c}".format(z=vm.vsphere, c=vm.cluster)
-
- commands.append("chmod +x /tmp/create-motd")
- cmd = (
- "/tmp/create-motd --purpose '{p}' --hardware 'vmware (x86_64)' --owner '{o}' "
- "--location 'VMWare' --zone '{z}' --customer '{c}' --email '{m}' --tier '{t}' "
- "--environment '{e}' --role '{r}'").format(
- p=purpose, t=vm.puppet_tier, o=vm.customer, z=zone, c=vm.puppet_customer,
- m=vm.puppet_contact, e=vm.puppet_env, r=vm.puppet_role)
- if vm.puppet_project:
- cmd += " --project '{pr}'".format(pr=vm.puppet_project)
- cmd += " | tee /etc/motd"
- commands.append(cmd)
- commands.append("rm -fv /tmp/create-motd")
-
- # ## Registring RHEL on RedHat Subscription Management
- if vm.is_rhel:
- commands.append("chmod +x /tmp/register-rhel")
- commands.append("/tmp/register-rhel -v -U '${var.rhsm_user_name}'")
- commands.append("rm -fv /tmp/rhsm-user-passwd /tmp/register-rhel")
-
- # ## Configuring and starting puppet
- if vm.has_puppet:
- commands.append("chmod +x /tmp/init-puppet")
- cmd = "/tmp/init-puppet --environment '{e}' --customer '{c}' "
- if vm.puppet_project:
- cmd += "--project '{pr}' "
- cmd += "--role '{r}' --owner '{o}' --tier '{t}' --purpose '{p}' --email '{m}'"
- cmd += " --zone '{z}'"
- # if vm.puppet_initial_install:
- # cmd += " --initial-install"
- cmd = cmd.format(
- p=purpose, t=vm.puppet_tier, o=vm.customer, c=vm.puppet_customer, z=zone,
- pr=vm.puppet_project, m=vm.puppet_contact, e=vm.puppet_env, r=vm.puppet_role)
- commands.append(cmd)
- commands.append("rm -fv /tmp/init-puppet")
-
- content += ' provisioner "remote-exec" {\n'
- content += ' inline = [\n'
- for cmd in commands:
- content += ' "{}",\n'.format(cmd)
- content += ' ]\n'
- content += ' connection {\n'
- content += ' type = "ssh"\n'
- content += ' host = "{}"\n'.format(vm.fqdn)
- content += ' user = "root"\n'
- content += ' private_key = file("{}")\n'.format(self.private_key_rel)
- content += ' agent = "false"\n'
- content += ' }\n'
- content += ' }\n\n'
-
- # ## postconfigure actions with puppet
- if vm.has_puppet:
- content += self._create_instfile_puppet(vm)
- else:
- content += self._only_update_packages(vm)
-
- # ## Unregistring from RedHat Subscription Management
- cmd = ('if [ -x /sbin/subscription-manager ] ; then '
- '/sbin/subscription-manager unregister --no-progress-messages; fi || true')
- if vm.is_rhel:
- content += ' provisioner "remote-exec" {\n'
- content += ' inline = [\n'
- content += ' "{}"\n'.format(cmd)
- content += ' ]\n'
- content += ' when = destroy\n'
- content += ' connection {\n'
- content += ' type = "ssh"\n'
- content += ' host = "{}"\n'.format(vm.fqdn)
- content += ' user = "root"\n'
- content += ' }\n'
- content += ' }\n\n'
-
- # ## local SSH cleanup on destroy
- content += textwrap.indent(textwrap.dedent('''\
- provisioner "local-exec" {{
- command = "ssh-keygen -R {h} || true"
- when = destroy
- }}
-
- provisioner "local-exec" {{
- command = "ssh-keygen -R {i} || true"
- when = destroy
- }}
- '''), ' ').format(h=vm.fqdn, i=vm.interfaces[0].address)
-
- content += '}\n\n'
-
- return content
-
- # -------------------------------------------------------------------------·
- def _create_instfile_nw(self, vm):
-
- content = ''
-
- gw4 = None
- gw6 = None
- for iface in vm.interfaces:
-
- content += " network_interface {\n"
- if iface.address_v4:
- content += ' ipv4_address = "{}"\n'.format(iface.address_v4)
- if iface.netmask_v4 is not None:
- content += ' ipv4_netmask = "{}"\n'.format(iface.netmask_v4)
- if iface.address_v6:
- content += ' ipv6_address = "{}"\n'.format(iface.address_v6)
- if iface.netmask_v6 is not None:
- content += ' ipv6_netmask = "{}"\n'.format(iface.netmask_v6)
- content += ' }\n\n'
-
- if not gw4:
- gw4 = iface.gateway_v4
- if not gw6:
- gw6 = iface.gateway_v6
-
- if gw4:
- content += ' ipv4_gateway = "{}"\n'.format(gw4)
- if gw6:
- content += ' ipv6_gateway = "{}"\n'.format(gw6)
-
- ns = ', '.join(map(lambda x: '"{}"'.format(x), vm.nameservers))
- content += ' dns_server_list = [{}]\n'.format(ns)
-
- return content
-
- # -------------------------------------------------------------------------·
- def _create_instfile_puppet(self, vm):
-
- ca_cmd = (
- "ssh -o StrictHostKeyChecking=no {ca} "
- "'sudo /opt/puppetlabs/bin/puppetserver ca sign --certname {h} || true'").format(
- ca=self.config.puppetca, h=vm.fqdn)
-
- command_list = [
- "/opt/puppetlabs/bin/puppet agent --test || true",
- "/usr/bin/systemctl start puppet.service",
- "/usr/bin/systemctl enable puppet.service",
- "chmod +x /tmp/update-all-packages",
- "/tmp/update-all-packages",
- "rm -fv /tmp/update-all-packages",
- ]
- if vm.is_rhel:
- command_list.append("chmod +x /tmp/update-networkmanager")
- command_list.append("/tmp/update-networkmanager -v")
- command_list.append("rm -fv /tmp/update-networkmanager")
- command_list.append("rm -fv /tmp/functions.rc")
- commands=',\n '.join(map( lambda x: '"' + x + '"', command_list))
-
- content = textwrap.indent(textwrap.dedent('''\
- provisioner "local-exec" {{
- command = "{ca_cmd}"
- }}
-
- provisioner "remote-exec" {{
- inline = [
- {commands},
- ]
- connection {{
- type = "ssh"
- host = "{h}"
- user = "root"
- private_key = file("{k}")
- agent = "false"
- }}
- }}
-
- '''), ' ').format(ca_cmd=ca_cmd, commands=commands, h=vm.fqdn, k=self.private_key_rel)
-
- # Destroy actions with puppet
- cmd1 = "ssh -o StrictHostKeyChecking=no {ma} "
- cmd1 += "'sudo /opt/puppetlabs/bin/puppet node deactivate {h} || true'"
- cmd1 = cmd1.format(ma=self.config.puppetmaster, h=vm.fqdn)
-
- cmd2 = "ssh -o StrictHostKeyChecking=no {ca} "
- cmd2 += "'sudo /opt/puppetlabs/bin/puppetserver ca clean --certname {h} || true'"
- cmd2 = cmd2.format(ca=self.config.puppetca, h=vm.fqdn)
-
- content += textwrap.indent(textwrap.dedent('''\
- provisioner "remote-exec" {{
- inline = [
- "/usr/bin/systemctl stop puppet.service || true",
- ]
- when = destroy
- connection {{
- type = "ssh"
- host = "{h}"
- user = "root"
- }}
- }}
-
- provisioner "local-exec" {{
- command = "{cmd1}"
- when = destroy
- }}
-
- provisioner "local-exec" {{
- command = "{cmd2}"
- when = destroy
- }}
-
- '''), ' ').format(cmd1=cmd1, cmd2=cmd2, h=vm.fqdn, k=self.private_key_rel)
-
- return content
-
- # -------------------------------------------------------------------------·
- def _only_update_packages(self, vm):
-
- command_list = [
- "chmod +x /tmp/update-all-packages",
- "/tmp/update-all-packages",
- "rm -fv /tmp/update-all-packages",
- ]
- if vm.is_rhel:
- command_list.append("chmod +x /tmp/update-networkmanager")
- command_list.append("/tmp/update-networkmanager -v")
- command_list.append("rm -fv /tmp/update-networkmanager")
- command_list.append("rm -fv /tmp/functions.rc")
- commands=',\n '.join(map( lambda x: '"' + x + '"', command_list))
-
- content = textwrap.indent(textwrap.dedent('''\
- provisioner "remote-exec" {{
- inline = [
- {commands},
- ]
- connection {{
- type = "ssh"
- host = "{h}"
- user = "root"
- private_key = file("{k}")
- agent = "false"
- }}
- }}
-
- '''), ' ').format(commands=commands, h=vm.fqdn, k=self.private_key_rel)
-
- return content
-
-
-# =============================================================================
-
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2024 by Frank Brehm, Berlin
-@summary: A mixin for the handler module for early used methods.
-"""
-from __future__ import absolute_import, print_function
-
-# Standard module
-import logging
-
-from distutils.version import LooseVersion
-
-from subprocess import PIPE
-
-# Third party modules
-from fb_tools.errors import HandlerError, ExpectedHandlerError, CommandNotFoundError
-
-from fb_pdnstools.server import PowerDNSServer
-from fb_pdnstools.errors import PowerDNSHandlerError
-
-# Own modules
-from ..config import CrTfConfiguration
-
-from ..errors import AbortExecution
-
-from ..terraform.vm import TerraformVm
-
-from ..terraform.disk import TerraformDisk
-
-from ..xlate import XLATOR
-
-__version__ = '0.1.0'
-LOG = logging.getLogger(__name__)
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-
-# =============================================================================
-class CrTfHandlerFirstMixin():
- """A Mixin module for the handler module for early used methods."""
-
- # -------------------------------------------------------------------------
- def incr_verbosity(self, diff=1):
-
- new_verbose = self.verbose + int(diff)
- if new_verbose < 0:
- new_verbose = 0
- self.verbose = new_verbose
-
- if self.pdns:
- self.pdns.verbose = self.verbose
-
- for vname in self.vsphere:
- self.vsphere[vname].verbose = self.verbose
-
- # -------------------------------------------------------------------------
- def init_handlers(self):
-
- if not self.config:
- msg = _("No configuration given before initialisation of handlers.")
- raise HandlerError(msg)
-
- if not isinstance(self.config, CrTfConfiguration):
- raise HandlerError(_(
- "{n} is not a {e}-instance, but a {w}-instance instead.").format(
- n='self.config', e='CrTfConfiguration', w=self.config.__class__.__name__))
-
- TerraformDisk.default_size = self.config.disk_size
- TerraformDisk.min_size_gb = self.config.disk_min_size
- TerraformDisk.max_size_gb = self.config.disk_max_size
-
- TerraformVm.min_rootdisk_size = self.config.root_min_size
- TerraformVm.max_rootdisk_size = self.config.root_max_size
-
- LOG.info(_("Initialize some additional handlers."))
-
- self.terraform_cmd = self.get_command('terraform', quiet=True)
- if not self.terraform_cmd:
- raise CommandNotFoundError('terraform')
- self.check_terraform_version()
-
- self.pdns = PowerDNSServer(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- master_server=self.config.pdns_master_server,
- port=self.config.pdns_api_port, key=self.config.pdns_api_key,
- use_https=self.config.pdns_api_use_https, path_prefix=self.config.pdns_api_path_prefix,
- simulate=self.simulate, force=self.force, initialized=True,
- )
-
- if not self.config.no_pdns:
- try:
- api_version = self.pdns.get_api_server_version() # noqa
- except (PowerDNSHandlerError, ConnectionError) as e:
- msg = "{c}: {e}".format(c=e.__class__.__name__, e=str(e))
- raise ExpectedHandlerError(msg)
-
- # -------------------------------------------------------------------------
- def check_terraform_version(self):
- """ Checking, that the called terraform has a minimum version."""
-
- tf_timeout = 10
-
- got_tf_version = None
- LOG.info(_("Checking the terraform version ..."))
-
- cmd = [str(self.terraform_cmd), 'version']
- cmd_str = ' '.join(cmd)
- LOG.debug(_("Executing {!r} ...").format(cmd_str))
- result = self.run(
- cmd, may_simulate=False, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
- LOG.debug(_("Completed process:") + "\n" + str(result))
-
- if not result.stdout:
- msg = _("No output on command {!r}.").format(cmd_str)
- raise ExpectedHandlerError(msg)
- lines = result.stdout.splitlines()
-
- if self.verbose > 2:
- LOG.debug(_("First line:") + '\n' + lines[0])
- match = self.re_tf_version.search(lines[0])
- if not match:
- msg = _("Could not evaluate version output of terraform:") + '\n' + result.stdout
- raise ExpectedHandlerError(msg)
-
- got_tf_version = LooseVersion(match.group(1))
- LOG.info(_("Terraform version: {!r}.").format(str(got_tf_version)))
-
- if self.min_version_terraform:
- LOG.debug(_("Checking for {o}{m!r} ...").format(
- o='>=', m=str(self.min_version_terraform)))
- if got_tf_version < self.min_version_terraform:
- msg = _("Invalid version {c!r} of terraform, expected {o}{m!r}.").format(
- c=str(got_tf_version), o='>=', m=str(self.min_version_terraform))
- raise ExpectedHandlerError(msg)
-
- if self.max_version_terraform:
- LOG.debug(_("Checking for {o}{m!r} ...").format(
- o='<=', m=str(self.max_version_terraform)))
- if got_tf_version > self.max_version_terraform:
- msg = _("Invalid version {c!r} of terraform, expected {o}{m!r}.").format(
- c=str(got_tf_version), o='<=', m=str(self.max_version_terraform))
- raise ExpectedHandlerError(msg)
-
- # -------------------------------------------------------------------------
- def first_call(self, yaml_file):
- """First steps until reading the YAML file."""
-
- if not self.initialized:
- raise HandlerError(_("{}-object not initialized.").format(self.__class__.__name__))
-
- try:
-
- self.exec_init_run()
-
- LOG.info(_("Go ahead..."))
-
- self.exec_read_yaml(yaml_file)
-
- print()
- LOG.info(_("Initialising VSPhere handlers."))
- self.init_vspheres(yaml_file)
-
- return True
-
- except AbortExecution as e:
- LOG.warn(str(e))
- return False
-
- # -------------------------------------------------------------------------·
- def exec_init_run(self):
-
- if self.stop_at_step == 'init':
- self.incr_verbosity()
-
- if self.verbose > 2:
- LOG.debug(_("Current {} object:").format(self.__class__.__name__) + "\n" + str(self))
-
- LOG.info(_("Finished step {!r}.").format('init'))
- if self.stop_at_step == 'init':
- raise AbortExecution('init')
-
-
-# =============================================================================
-
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2024 by Frank Brehm, Berlin
-@summary: A mixin module for the handler for methods for reading and evaluating YAML files.
-"""
-from __future__ import absolute_import, print_function
-
-# Standard module
-import logging
-
-# Third party modules
-import yaml
-import six
-
-from fb_tools.common import pp, to_bool
-from fb_tools.errors import ExpectedHandlerError
-
-# Own modules
-from ..errors import AbortExecution
-
-from ..terraform.vm import TerraformVm
-
-from ..xlate import XLATOR
-
-__version__ = '0.1.1'
-LOG = logging.getLogger(__name__)
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-
-# =============================================================================
-class CrTfHandlerReadMixin():
- """A mixin module for the handler module for reading and evaluation."""
-
- # -------------------------------------------------------------------------·
- def exec_read_yaml(self, yaml_file):
-
- if self.stop_at_step == 'read-yaml':
- self.incr_verbosity()
-
- self.read_yaml_data(yaml_file)
- self.eval_yaml_data()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in evaluation of YAML data of {f!r}.",
- "Found {n} errors in evaluation of YAML data of {f!r}.",
- self.eval_errors).format(n=self.eval_errors, f=str(yaml_file))
- raise ExpectedHandlerError(msg)
-
- if not self.vms:
- msg = _("Did not found any VMs to deploy in file {!r}.").format(str(yaml_file))
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Finished step {!r}.").format('read-yaml'))
- if self.stop_at_step == 'read-yaml':
- raise AbortExecution('read-yaml')
-
- # -------------------------------------------------------------------------·
- def read_yaml_data(self, yaml_file):
-
- LOG.info(_("Reading YAML file {!r} ...").format(str(yaml_file)))
-
- open_opts = {}
- if six.PY3 and self.config.encoding:
- open_opts['encoding'] = self.config.encoding
- open_opts['errors'] = 'surrogateescape'
-
- try:
- with open(str(yaml_file), 'r', **open_opts) as fh:
- self.yaml_data = yaml.full_load(fh)
- except yaml.YAMLError as e:
- msg = _("Error in YAML file {f!r}: {e}.").format(
- f=str(yaml_file), e=e)
- if hasattr(e, 'problem_mark'):
- mark = e.problem_mark
- msg += " " + _("Error position: {li}:{c}").format(
- li=mark.line + 1, c=mark.column + 1)
- raise ExpectedHandlerError(msg)
-
- if self.verbose > 2:
- LOG.debug(_("Read data from YAML file:") + "\n" + pp(self.yaml_data))
-
- if not isinstance(self.yaml_data, dict):
- msg = _(
- "Data read from YAML file {f!r} are not a dictionary, "
- "but a {c} object instead.").format(
- f=str(yaml_file), c=self.yaml_data.__class__.__name__)
- raise ExpectedHandlerError(msg)
-
- for key in self.yaml_data.keys():
- if key.lower() == 'simulate':
- self.simulate = to_bool(self.yaml_data[key])
-
- # -------------------------------------------------------------------------·
- def eval_yaml_data(self):
-
- self.vm_names = []
-
- # Searching for default VM definition
- LOG.debug(_("Searching for default VM definition ..."))
- for key in self.yaml_data.keys():
-
- if self.re_default.match(key):
- vm = self._eval_tpl_vm(name='Default VM', vm_def=self.yaml_data[key])
- if vm:
- self.default_vm = vm
-
- # Searching for VM definitions
- LOG.debug(_("Searching for VM definitions ..."))
- for key in self.yaml_data.keys():
- if self.re_vm_key.match(key):
- for vm_def in self.yaml_data[key]:
- vm = self._eval_vm(vm_def, template_vm=self.default_vm)
- if vm:
- self.vms.append(vm)
-
- # Searching for groups
- for key in self.yaml_data.keys():
- if self.re_group.match(key):
- self._eval_vm_groups(self.yaml_data[key], template_vm=self.default_vm, depth=1)
-
- if self.verbose > 2:
- vm_list = []
- for vm in self.vms:
- vm_list.append(vm.as_dict())
- LOG.debug(_("Evaluated VMs:") + "\n" + pp(vm_list))
-
- # -------------------------------------------------------------------------·
- def _eval_tpl_vm(self, name, vm_def, template_vm=None):
-
- try:
- vm = TerraformVm.from_def(
- vm_def, name=name, is_template=True, template_vm=template_vm, appname=self.appname,
- verbose=self.verbose, base_dir=self.base_dir, simulate=self.simulate,
- force=self.force, terminal_has_colors=self.terminal_has_colors)
- except Exception as e:
- if self.verbose > 2:
- self.handle_error(str(e), e.__class__.__name__, True)
- else:
- LOG.error(_("{c} in evaluating template VM: {e}").format(
- c=e.__class__.__name__, e=e))
- self.eval_errors += 1
- return None
-
- if self.verbose > 2:
- LOG.debug(_(
- "Defined Terraform Template VM {n!r}:").format(
- n=vm.name) + "\n" + pp(vm.as_dict()))
-
- return vm
-
- # -------------------------------------------------------------------------·
- def _eval_vm(self, vm_def, template_vm=None):
-
- try:
- vm = TerraformVm.from_def(
- vm_def, is_template=False, template_vm=template_vm, appname=self.appname,
- verbose=self.verbose, base_dir=self.base_dir, simulate=self.simulate,
- force=self.force, terminal_has_colors=self.terminal_has_colors)
- except Exception as e:
- if self.verbose > 2:
- self.handle_error(str(e), e.__class__.__name__, True)
- else:
- LOG.error(_("{c} in evaluating VM: {e}").format(c=e.__class__.__name__, e=e))
- self.eval_errors += 1
- return None
-
- if self.verbose > 3:
- LOG.debug(_(
- "Defined Terraform-VM {n!r}:").format(n=vm.name) + "\n" + pp(vm.as_dict()))
-
- if vm.name in self.vm_names:
- LOG.error(_("VM {!r} is already defined.").format(vm.name))
- self.eval_errors += 1
- return None
-
- return vm
-
- # -------------------------------------------------------------------------·
- def _eval_vm_groups(self, groups_def, template_vm=None, depth=1):
-
- if not isinstance(groups_def, list):
- msg = _("Group definition list is not a list:") + "\n" + pp(groups_def)
- LOG.error(msg)
- self.eval_errors += 1
- return
-
- if depth >= self.max_groups_depth:
- LOG.warn(_("Maximum recursion depth for VM groups of {} reached.").format(depth))
- return
-
- if self.verbose > 2:
- LOG.debug(_("Evaluating group list:") + "\n" + pp(groups_def))
- if self.verbose > 3:
- LOG.debug(_("Used template: {!r}").format(template_vm))
-
- for group_def in groups_def:
- self._eval_vm_group(group_def, template_vm=template_vm, depth=depth)
-
- # -------------------------------------------------------------------------·
- def _eval_vm_group(self, group_def, template_vm=None, depth=1):
-
- if not isinstance(group_def, dict):
- msg = _("VM definition is not a dictionary:") + "\n" + pp(group_def)
- LOG.error(msg)
- self.eval_errors += 1
- return
-
- group_template = template_vm
- group_name = None
-
- # Searching for the group name ..."
- for key in group_def.keys():
- if self.re_group_name.match(key) and str(group_def[key]).strip():
- group_name = str(group_def[key]).strip()
-
- if not group_name:
- LOG.error(_("No group name defined."))
- return
-
- # Searching for group default VM definition
- LOG.debug(_("Searching for group default VM definition in group {!r} ...").format(
- group_name))
- for key in group_def.keys():
-
- if self.re_default.match(key):
- vm_name = 'Default VM group {!r}'.format(group_name)
- vm = self._eval_tpl_vm(
- name=vm_name, vm_def=group_def[key], template_vm=template_vm)
- if vm:
- group_template = vm
- break
-
- n = None
- if group_template:
- n = group_template.name
- LOG.debug(_("Used template for creating VMs in group {g!r}: {n!r}").format(
- g=group_name, n=n))
- if self.verbose > 3:
- LOG.debug(_("Used template structure:") + "\n" + pp(group_template.as_dict()))
-
- # Searching for VM definitions
- LOG.debug(_("Searching for VM definitions in group {!r} ...").format(group_name))
- for key in group_def.keys():
- if self.re_vm_key.match(key):
- for vm_def in group_def[key]:
- vm = self._eval_vm(vm_def, template_vm=group_template)
- if vm:
- self.vms.append(vm)
-
- # Searching for nested groups
- for key in group_def.keys():
- if self.re_group.match(key):
- self._eval_vm_groups(
- group_def[key], template_vm=group_template, depth=depth + 1)
-
-
-# =============================================================================
-
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
+++ /dev/null
-#!/usr/bin/env pythonV
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2024 by Frank Brehm, Berlin
-@summary: A mixin module for the handler for methods for interacting with VMware/VSphere.
-"""
-from __future__ import absolute_import, print_function
-
-# Standard module
-import copy
-import logging
-import os
-import re
-import sys
-
-from pathlib import Path
-
-from operator import attrgetter
-
-# Third party modules
-from fb_tools.common import pp
-from fb_tools.errors import HandlerError, ExpectedHandlerError
-from fb_vmware.errors import VSphereExpectedError
-from fb_vmware.config import VSPhereConfigInfo
-from fb_vmware.connect import VsphereConnection
-
-# Own modules
-from ..errors import AbortExecution
-
-from ..xlate import XLATOR
-
-__version__ = '0.1.3'
-LOG = logging.getLogger(__name__)
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-
-# =============================================================================
-class CrTfHandlerVmwMixin():
- """A mixin module for the handler module for interacting with VMware/VSphere.."""
-
- # -------------------------------------------------------------------------·
- def exec_collect_folders(self, yaml_file):
-
- if self.stop_at_step == 'collect-folders':
- self.incr_verbosity()
-
- LOG.info(_("Collecting all VMWare and local folders ..."))
- LOG.info(_("Get vSphere datacenter ..."))
- for vname in self.vsphere:
- self.vsphere[vname].get_datacenter()
-
- LOG.debug(_("Collecting vSphere folders."))
- self.vsphere_folders = []
- for vm in self.vms:
- if vm.folder:
- if vm.folder not in self.vsphere_folders:
- self.vsphere_folders.append(vm.folder)
- self.vsphere_folders.sort(key=str.lower)
- LOG.debug(_("Collected vSphere folders:") + "\n" + pp(self.vsphere_folders))
-
- # Set project name and directory
- yfile = Path(yaml_file)
- yfile_base = yfile.name
- yfile_dir = yfile.parent.resolve()
- (yfile_stem, yfile_ext) = os.path.splitext(yfile_base)
- self.project_name = yfile_stem
- LOG.info(_("Project name is {!r}.").format(str(self.project_name)))
- self.project_dir = yfile_dir / yfile_stem
- LOG.info(_("Project directory is: {!r}.").format(str(self.project_dir)))
-
- # Evaluating root terraform directory
- if not self.is_venv:
- i = 4
- cdir = copy.copy(self.project_dir).parent
- while i > 0:
- git_dir = cdir / '.git'
- if git_dir.is_dir():
- self._terraform_root_dir = cdir
- break
- i -= 1
- if cdir == cdir.parent:
- break
- cdir = cdir.parent
- if not self._terraform_root_dir:
- msg = _("Did not found root terraform directory above {!r}.").format(
- str(self.project_dir))
- LOG.warn(msg)
-
- LOG.info(_("Full project name: {!r}").format(self.full_project_name))
-
- LOG.info(_("Finished step {!r}.").format('collect-folders'))
- if self.stop_at_step == 'collect-folders':
- raise AbortExecution('collect-folders')
-
- # -------------------------------------------------------------------------·
- def init_vspheres(self, yaml_file):
-
- if self.stop_at_step == 'vmw-init':
- self.incr_verbosity()
-
- LOG.debug(_("Initialize VSPhere ..."))
- # Test for multiple VSphere references
- found_vspheres = []
- for vm in self.vms:
- vname = vm.vsphere
- if vname not in found_vspheres:
- found_vspheres.append(vname)
- if len(found_vspheres) > 1:
- yaml_file_rel = os.path.relpath(str(yaml_file), os.getcwd())
- msg = _("There is only one, unique VSPhere definition allowed in a project file.")
- msg += '\n'
- msg += _("In {f!r} were found {nr} different VSPhere definitions:").format(
- f=yaml_file_rel, nr=len(found_vspheres))
- for vname in sorted(found_vspheres, key=str.lower):
- msg += '\n * {!r}'.format(vname)
- raise ExpectedHandlerError(msg)
-
- self._init_vspheres()
-
- LOG.info(_("Finished step {!r}.").format('vmw-init'))
- if self.stop_at_step == 'vmw-init':
- raise AbortExecution('vmw-init')
-
- # -------------------------------------------------------------------------·
- def _init_vspheres(self):
-
- for vm in self.vms:
- if vm.vsphere in self.vsphere:
- continue
- vname = vm.vsphere
- LOG.debug(_("Initializing VSphere {!r} ...").format(vname))
- if vname not in self.config.vsphere:
- msg = _("VSPhere {!r} not defined in configuration.").format(vname)
- raise ExpectedHandlerError(msg)
-
- if not self.vsphere_user and self.config.vsphere[vname].user:
- LOG.debug(_("Setting {st} to {what!r}.").format(
- st='handler.vsphere_user', what=self.config.vsphere[vname].user))
- self.vsphere_user = self.config.vsphere[vname].user
- if not self.vsphere_password and self.config.vsphere[vname].password:
- LOG.debug(_("Setting {}.").format('handler.vsphere_password'))
- self.vsphere_password = self.config.vsphere[vname].password
-
- try:
- params = {
- 'appname': self.appname,
- 'verbose': self.verbose,
- 'base_dir': self.base_dir,
- 'simulate': self.simulate,
- 'force': self.force,
- 'terminal_has_colors': self.terminal_has_colors,
- 'initialized': True,
- }
- show_params = copy.copy(params)
-
- connect_info = VSPhereConfigInfo(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- host=self.config.vsphere[vname].host, port=self.config.vsphere[vname].port,
- dc=self.config.vsphere[vname].dc, user=self.vsphere_user,
- password=self.vsphere_password, initialized=True)
-
- params['connect_info'] = connect_info
- show_params['connect_info'] = connect_info.as_dict()
-
- if self.verbose > 1:
- if self.verbose < 5:
- show_params['connect_info']['password'] = '******'
- msg = _("Initialising a {}-object with params:").format('VsphereConnection')
- msg += '\n' + pp(show_params)
- LOG.debug(msg)
-
- vsphere = VsphereConnection(**params)
- self.vsphere[vname] = vsphere
-
- except VSphereExpectedError as e:
- raise ExpectedHandlerError(str(e))
-
- # -------------------------------------------------------------------------·
- def test_vsphere_handlers(self):
-
- if self.stop_at_step == 'vmw-test':
- self.incr_verbosity()
-
- for vname in self.vsphere.keys():
-
- try:
-
- vsphere = self.vsphere[vname]
-
- vsphere.get_about()
- if self.verbose > 2:
- msg = _("Created {}-object:").format('VsphereConnection')
- msg += '\n' + pp(vsphere.as_dict())
- LOG.debug(msg)
-
- except VSphereExpectedError as e:
- raise ExpectedHandlerError(str(e))
-
- LOG.info(_("Finished step {!r}.").format('vmw-test'))
- if self.stop_at_step == 'vmw-test':
- raise AbortExecution('vmw-test')
-
- # -------------------------------------------------------------------------·
- def assign_default_vmw_values(self):
- """Assigning not defined templates and clusters of VMs by their
- appropriate default values."""
-
- LOG.debug(_(
- "Assigning not defined templates and clusters of VMs by their "
- "appropriate default values."))
-
- for vm in self.vms:
-
- if not vm.cluster:
- cl = self.config.vsphere[vm.vsphere].cluster
- if self.verbose > 1:
- LOG.debug(_("Setting cluster of {n!r} to {c!r} ...").format(
- n=vm.name, c=cl))
- vm.cluster = cl
-
- if not vm.vm_template:
- tpl = self.config.vsphere[vm.vsphere].template_name
- if self.verbose > 1:
- LOG.debug(_("Setting template of {n!r} to {t!r} ...").format(
- n=vm.name, t=tpl))
- vm.vm_template = tpl
-
- # -------------------------------------------------------------------------·
- def exec_vmw_clusters(self):
-
- if self.stop_at_step == 'vmw-clusters':
- self.incr_verbosity()
-
- for vname in self.vsphere:
- LOG.debug(_("Searching for clusters in VSPhere {!r} ...").format(vname))
- self.vsphere[vname].get_clusters()
-
- LOG.info(_("Finished step {!r}.").format('vmw-clusters'))
- if self.stop_at_step == 'vmw-clusters':
- raise AbortExecution('vmw-clusters')
-
- # -------------------------------------------------------------------------·
- def exec_vmw_datastores(self):
-
- if self.stop_at_step == 'vmw-datastores':
- self.incr_verbosity()
-
- nr_total = 0
-
- for vname in self.vsphere:
- LOG.debug(_("Searching for datastores in VSPhere {!r} ...").format(vname))
- self.vsphere[vname].get_datastores()
- nr_total += len(self.vsphere[vname].datastores.keys())
-
- if nr_total:
- msg = ngettext("Found one datastore.", "Found {n} datastores.", nr_total)
- LOG.debug(msg.format(n=nr_total))
- else:
- LOG.error(_("No VSPhere datastores found."))
-
- LOG.info(_("Finished step {!r}.").format('vmw-datastores'))
- if self.stop_at_step == 'vmw-datastores':
- raise AbortExecution('vmw-datastores')
-
- # -------------------------------------------------------------------------·
- def exec_vmw_ds_clusters(self):
-
- nr_total = 0
-
- if self.stop_at_step == 'vmw-ds-clusters':
- self.incr_verbosity()
-
- for vname in self.vsphere:
- LOG.debug(_("Searching for datastore clusters in VSPhere {!r} ...").format(vname))
- self.vsphere[vname].get_ds_clusters()
- nr_total += len(self.vsphere[vname].ds_clusters.keys())
-
- if nr_total:
- msg = ngettext(
- "Found one datastore cluster.",
- "Found {n} datastore clusters.",
- nr_total)
- LOG.debug(msg.format(n=nr_total))
- else:
- LOG.warn(_("No VSPhere datastore clusters found."))
-
- LOG.info(_("Finished step {!r}.").format('vmw-ds-clusters'))
- if self.stop_at_step == 'vmw-ds-clusters':
- raise AbortExecution('vmw-ds-clusters')
-
- # -------------------------------------------------------------------------·
- def exec_vmw_networks(self):
-
- if self.stop_at_step == 'vmw-networks':
- self.incr_verbosity()
-
- for vname in self.vsphere:
- LOG.debug(_("Searching for networks in VSPhere {!r} ...").format(vname))
- self.vsphere[vname].get_networks()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in exploring vSphere {v!r} resources.",
- "Found {n} errors in exploring vSphere {v!r} resources.",
- self.eval_errors).format(n=self.eval_errors, v=vname)
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Finished step {!r}.").format('vmw-networks'))
- if self.stop_at_step == 'vmw-networks':
- raise AbortExecution('vmw-networks')
-
- # -------------------------------------------------------------------------·
- def exec_vmw_templates(self):
-
- if self.stop_at_step == 'vmw-templates':
- self.incr_verbosity()
-
- self.explore_vsphere_templates()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in exploring vSphere templates.",
- "Found {n} errors in exploring vSphere templates.",
- self.eval_errors).format(n=self.eval_errors)
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Finished step {!r}.").format('vmw-templates'))
- if self.stop_at_step == 'vmw-templates':
- raise AbortExecution('vmw-templates')
-
- # -------------------------------------------------------------------------·
- def exec_validate_yaml(self):
-
- if self.stop_at_step == 'validate-yaml':
- self.incr_verbosity()
-
- print()
- LOG.info(_("Validating information from YAML file ..."))
-
- self.validate_clusters()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in validating vSphere computing clusters.",
- "Found {n} errors in validating vSphere computing clusters.",
- self.eval_errors).format(n=self.eval_errors)
- raise ExpectedHandlerError(msg)
-
- self.get_all_vms()
- self.validate_vms()
-
- LOG.info(_("Finished step {!r}.").format('validate-yaml'))
- if self.stop_at_step == 'validate-yaml':
- raise AbortExecution('validate-yaml')
-
- # -------------------------------------------------------------------------·
- def get_all_vms(self):
-
- LOG.info(_("Got a list of all VMs and templates ..."))
- self.all_vms = {}
- re_vm = re.compile(r'.*')
-
- for vs_name in self.vsphere:
-
- if vs_name not in self.all_vms:
- self.all_vms[vs_name] = {}
-
- vm_list = self.vsphere[vs_name].get_vms(re_vm, name_only=True)
- for vm_tuple in vm_list:
- vm_name = vm_tuple[0]
- vm_path = vm_tuple[1]
- if vm_name in self.all_vms[vs_name]:
- self.all_vms[vs_name][vm_name].append(vm_path)
- else:
- self.all_vms[vs_name][vm_name] = [vm_path]
-
- if self.verbose > 2:
- msg = _("All existing VMs and templates:")
- msg += '\n' + pp(self.all_vms)
- LOG.debug(msg)
-
- # -------------------------------------------------------------------------·
- def exec_validate_storage(self):
-
- if self.stop_at_step == 'validate-storage':
- self.incr_verbosity()
-
- self.validate_storages()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in validating VM storages.",
- "Found {n} errors in validating VM storages.",
- self.eval_errors).format(n=self.eval_errors)
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Finished step {!r}.").format('validate-storage'))
- if self.stop_at_step == 'validate-storage':
- raise AbortExecution('validate-storage')
-
- # -------------------------------------------------------------------------·
- def exec_validate_iface(self):
-
- if self.stop_at_step == 'validate-iface':
- self.incr_verbosity()
-
- self.validate_interfaces()
- if self.eval_errors:
- msg = ngettext(
- "Found one error in validating VM interfaces.",
- "Found {n} errors in validating VM interfaces.",
- self.eval_errors).format(n=self.eval_errors)
- raise ExpectedHandlerError(msg)
-
- LOG.info(_("Finished step {!r}.").format('validate-iface'))
- if self.stop_at_step == 'validate-iface':
- raise AbortExecution('validate-iface')
-
- # -------------------------------------------------------------------------·
- def exec_vsphere_folders(self):
-
- if self.stop_at_step == 'ensure-vmw-folders':
- self.incr_verbosity()
-
- self.ensure_vsphere_folders()
-
- LOG.info(_("Finished step {!r}.").format('ensure-vmw-folders'))
- if self.stop_at_step == 'ensure-vmw-folders':
- raise AbortExecution('ensure-vmw-folders')
-
- # -------------------------------------------------------------------------·
- def explore_vsphere_templates(self):
-
- LOG.info(_("Exploring all vSphere templates ..."))
-
- for vname in self.vsphere:
-
- if vname not in self.vsphere_templates:
- self.vsphere_templates[vname] = {}
-
- self.config.vsphere[vname].used_templates = []
-
- for vm in self.vms:
- template_name = vm.vm_template
- if template_name:
- if template_name not in self.config.vsphere[vname].used_templates:
- self.config.vsphere[vname].used_templates.append(template_name)
- else:
- LOG.error(_("VM {!r} has not template defined.").format(vm.name))
- self.eval_errors += 1
-
- msg = _("All {} VSPhere templates to explore:").format(vname)
- msg += "\n" + pp(self.config.vsphere[vname].used_templates)
- LOG.debug(msg)
-
- for template_name in self.config.vsphere[vname].used_templates:
-
- if template_name in self.vsphere_templates[vname]:
- continue
-
- LOG.debug(_("Searching for template {t!r} in VSPhere {v!r} ...").format(
- t=template_name, v=vname))
- re_vm = re.compile(r'^' + re.escape(template_name) + r'$', re.IGNORECASE)
- vm_list = self.vsphere[vname].get_vms(re_vm, as_obj=True, stop_at_found=True)
- if vm_list:
- vm = vm_list[0]
- tname = vm.name.lower()
- if tname not in self.vsphere_templates[vname]:
- self.vsphere_templates[vname][template_name] = vm
- else:
- LOG.error(_("Template {t!r} not found in VSPhere {v!r}.").format(
- t=template_name, v=vname))
- self.eval_errors += 1
-
- if self.verbose > 2:
- msg = _("All explored vSphere templates:")
- out_dict = {}
- for vname in self.vsphere_templates:
- out_dict[vname] = {}
- for tname in self.vsphere_templates[vname]:
- out_dict[vname][tname] = self.vsphere_templates[vname][tname].as_dict()
- msg += "\n" + pp(out_dict)
- LOG.debug(msg)
-
- # -------------------------------------------------------------------------·
- def validate_clusters(self):
-
- print()
- LOG.info(_("Validating existence of computing clusters of the VMs."))
-
- clusters = {}
-
- for vm in self.vms:
-
- vname = vm.vsphere
- if vname not in clusters:
- clusters[vname] = {}
-
- if vm.cluster in clusters:
- clusters[vname][vm.cluster].append(vm.name)
- else:
- clusters[vname][vm.cluster] = [vm.name]
-
- for vname in clusters.keys():
- for cluster in clusters[vname].keys():
-
- vms = clusters[vname][cluster]
-
- cl = str(cluster)
- LOG.debug(_(
- "Checking existence of computing cluster {c!r} in VSPhere {v!r} ...").format(
- c=cl, v=vname))
-
- vsphere = self.vsphere[vname]
- vmw_cluster = vsphere.get_cluster_by_name(cl)
- if vmw_cluster:
- if self.verbose > 1:
- LOG.debug(_(
- "Found computing cluster {cl!r} in VSPhere {v!r} (defined for VMs "
- "{vms}).").format(cl=vmw_cluster.name, v=vname, vms=pp(vms)))
- else:
- LOG.error(_(
- "Computing cluster {cl!r} (defined for VMs {vms}) in VSPhere {v!r} not "
- "found.").format(cl=cl, vms=pp(vms), v=vname))
- self.eval_errors += 1
-
- # -------------------------------------------------------------------------·
- def validate_vms(self):
-
- print()
- LOG.info(_("Validating existence of VMs in VMWare."))
- vms2perform = []
-
- for vm in sorted(self.vms, key=attrgetter('tf_name')):
-
- print(" * {} ".format(vm.fqdn), end='', flush=True)
- if self.verbose:
- print()
- vs_name = vm.vsphere
- vsphere = self.vsphere[vs_name]
-
- vm_paths = None
- if vs_name in self.all_vms:
- if vm.fqdn in self.all_vms[vs_name]:
- vm_paths = self.all_vms[vs_name][vm.fqdn]
-
- if vm_paths:
- msg = _('[{m}] - VM is already existing in VSphere {v!r}, path {p!r}.').format(
- m=self.colored('Existing', 'YELLOW'), v=vs_name, p=pp(vm_paths))
- print(msg, end='', flush=True)
- if self.verbose:
- print()
-
- vm_info = vsphere.get_vm(vm.fqdn, vsphere_name=vs_name, as_obj=True)
- if self.verbose > 2:
- LOG.debug(_("VM info:") + "\n" + pp(vm_info.as_dict(bare=True)))
- ds = vm_info.config_path_storage
- LOG.debug(_("Datastore of VM {vm!r}: {ds!r}.").format(vm=vm.name, ds=ds))
- vm.datastore = ds
- vm.already_existing = True
- self.existing_vms.append(vm_info)
-
- else:
-
- print('[{}] '.format(self.colored('OK', 'GREEN')), end='', flush=True)
- vm.already_existing = False
-
- vms2perform.append(vm)
- print()
-
- self.vms = vms2perform
-
- print()
-
- if not len(self.vms):
- print()
- print(self.colored('*' * 60, ('BOLD', 'RED')), file=sys.stderr)
- print(self.colored('* ' + _('CAUTION!'), ('BOLD', 'RED')), file=sys.stderr)
- print(self.colored('*' * 60, ('BOLD', 'RED')), file=sys.stderr)
- print()
- print(
- self.colored(_('Did not found any VM to deploy!'), ('BOLD', 'RED')),
- file=sys.stderr)
- print()
- raise ExpectedHandlerError(_("No VMs to deploy"))
-
- # -------------------------------------------------------------------------·
- def validate_storages(self):
-
- self._validate_ds_clusters()
- self._validate_datastores()
-
- if self.verbose:
- if self.used_dc_clusters:
- out_lines = []
- for vs_name in self.used_dc_clusters:
- for cluster in self.used_dc_clusters[vs_name]:
- out_lines.append(' * VSphere {v!r}: {c}'.format(
- v=vs_name, c=cluster))
- out = '\n'.join(out_lines)
- LOG.debug(_("Used datastore clusters:") + "\n" + out)
- else:
- LOG.debug(_("No datastore clusters are used."))
- if self.used_datastores:
- out_lines = []
- for vs_name in self.used_datastores:
- for ds in self.used_datastores[vs_name]:
- out_lines.append(' * VSphere {v!r}: {ds}'.format(v=vs_name, ds=ds))
- out = '\n'.join(out_lines)
- LOG.debug(_("Used datastors:") + "\n" + out)
- else:
- LOG.debug(_("No datastores are used."))
-
- # -------------------------------------------------------------------------·
- def _validate_ds_clusters(self):
-
- LOG.info(_("Validating given datastore clusters of VMs ..."))
-
- for vm in self.vms:
-
- if not vm.ds_cluster:
- continue
-
- self._validate_dscluster_vm(vm)
-
- # -------------------------------------------------------------------------·
- def _validate_dscluster_vm(self, vm):
-
- if self.verbose > 2:
- LOG.debug('Disk mappings:' + '\n' + pp(vm.disks._map))
-
- needed_gb = 0.0
- if not vm.already_existing:
- for unit_number in vm.disks.keys():
- disk = vm.disks[unit_number]
- needed_gb += disk.size_gb
-
- vs_name = vm.vsphere
- vsphere = self.vsphere[vs_name]
-
- found = False
- for cluster_name in vsphere.ds_clusters.keys():
- if cluster_name.lower() == vm.ds_cluster.lower():
- if self.verbose > 2:
- LOG.debug(_(
- "Found datastore cluster {c!r} in VSphere {v!r} for VM {n!r}.").format(
- n=vm.name, v=vs_name, c=vm.ds_cluster))
- if vm.ds_cluster != cluster_name:
- LOG.debug(_("Setting datastore cluster for VM {n!r} to {c!r} ...").format(
- n=vm.name, c=cluster_name))
- vm.ds_cluster = cluster_name
- ds_cluster = vsphere.ds_clusters[cluster_name]
- if self.verbose > 2:
- LOG.debug(_(
- "Free space of cluster {c!r} in VSphere {v!r} before provisioning: "
- "{a:0.1f} GiB.").format(
- c=cluster_name, v=vs_name, a=ds_cluster.avail_space_gb))
- if ds_cluster.avail_space_gb < needed_gb:
- LOG.error(_(
- "Datastore cluster {d!r} in VSphere {v!r} has not sufficient space for "
- "storage of VM {vm!r} (needed {n:0.1f} GiB, available {a:0.1f} "
- "GiB).").format(
- d=cluster_name, v=vs_name, vm=vm.name, n=needed_gb,
- a=ds_cluster.avail_space_gb))
- self.eval_errors += 1
- else:
- ds_cluster.calculated_usage += needed_gb
- if self.verbose > 1:
- LOG.debug(_(
- "Free space in cluster {c!r} in VSphere {v!r} after provisioning: "
- "{a:0.1f} GiB.").format(
- c=cluster_name, v=vs_name, a=ds_cluster.avail_space_gb))
- found = True
- if vs_name not in self.used_dc_clusters:
- self.used_dc_clusters[vs_name] = []
- if cluster_name not in self.used_dc_clusters[vs_name]:
- self.used_dc_clusters[vs_name].append(cluster_name)
- break
-
- if not found:
- LOG.error(_("Datastore cluster {c!r} of VM {n!r} not found in VSphere {v!r}.").format(
- n=vm.name, c=vm.ds_cluster, v=vs_name))
- self.eval_errors += 1
-
- # -------------------------------------------------------------------------·
- def _validate_datastores(self):
-
- LOG.info(_("Validating given datastores of VMs and assign failing ..."))
-
- for vm in self.vms:
-
- if vm.ds_cluster:
- if vm.datastore:
- LOG.debug(_("Removing defined datastore {d!r} for VM {n!r} ...").format(
- d=vm.datastore, n=vm.name))
- vm.datastore = None
- continue
-
- self._validate_ds_vm(vm)
-
- # -------------------------------------------------------------------------·
- def _validate_ds_vm(self, vm):
-
- needed_gb = 0.0
- if not vm.already_existing:
- for unit_number in vm.disks.keys():
- disk = vm.disks[unit_number]
- needed_gb += disk.size_gb
-
- vs_name = vm.vsphere
- vsphere = self.vsphere[vs_name]
-
- vm_cluster = None
- for cluster in vsphere.clusters:
- if cluster.name.lower() == vm.cluster.lower():
- vm_cluster = cluster
- break
- if not vm_cluster:
- msg = _("Did not found cluster object {c!r} for VM {n!r}.").format(
- c=vm.cluster, n=vm.name)
- raise HandlerError(msg)
-
- if vm.datastore:
- found = False
- found_ds_name = None
- for ds_name in vsphere.datastores:
- if ds_name.lower() == vm.datastore.lower():
- if self.verbose > 2:
- LOG.debug(_("Found datastore {d!r} for VM {n!r} in VSPhere {v!r}.").format(
- n=vm.name, d=vm.datastore, v=vs_name))
- if ds_name not in vm_cluster.datastores:
- LOG.warn(_("Datastore {d!r} not available in cluster {c!r}.").format(
- d=ds_name, c=vm.cluster))
- break
- if vm.datastore != ds_name:
- LOG.debug(_("Setting datastore for VM {n!r} to {d!r} ...").format(
- n=vm.name, d=ds_name))
- vm.datastore = ds_name
- ds = vsphere.datastores[ds_name]
- if ds.avail_space_gb < needed_gb:
- LOG.error(_(
- "Datastore {d!r} has not sufficient space for storage of VM "
- "{v!r} (needed {n:0.1f} GiB, available {a:0.1f} GiB).").format(
- d=ds_name, v=vm.name, n=needed_gb, a=ds.avail_space_gb))
- self.eval_errors += 1
- else:
- ds.calculated_usage += needed_gb
- found = True
- found_ds_name = ds_name
- break
- if not found:
- LOG.error(_("Datastore {d!r} of VM {n!r} not found in VSPhere {v!r}.").format(
- n=vm.name, d=vm.datastore, v=vs_name))
- self.eval_errors += 1
- if vs_name not in self.used_datastores:
- self.used_datastores[vs_name] = []
- if found_ds_name not in self.used_datastores[vs_name]:
- self.used_datastores[vs_name].append(found_ds_name)
- return
-
- ds_name = vsphere.datastores.find_ds(
- needed_gb, vm.ds_type, use_ds=copy.copy(vm_cluster.datastores), no_k8s=True)
- if ds_name:
- LOG.debug(_("Found datastore {d!r} for VM {n!r} in VSPhere {v!r}.").format(
- d=ds_name, n=vm.name, v=vs_name))
- vm.datastore = ds_name
- if vs_name not in self.used_datastores:
- self.used_datastores[vs_name] = []
- if ds_name not in self.used_datastores[vs_name]:
- self.used_datastores[vs_name].append(ds_name)
- else:
- self.eval_errors += 1
-
- # -------------------------------------------------------------------------·
- def validate_interfaces(self):
-
- LOG.info(_("Validating interfaces of VMs and assign networks ..."))
- for vm in self.vms:
- self._validate_interfaces_vm(vm)
-
- if self.verbose > 2:
- LOG.debug(_("Validated FQDNs:") + "\n" + pp(self.fqdns))
- LOG.debug(_("Validated Addresses:") + "\n" + pp(self.addresses))
-
- if self.verbose:
-
- lines = []
- for vs_name in self.used_networks:
- for nw in self.used_networks[vs_name]:
- lines.append(' * VSphere {v!r}: {n}'.format(
- v=vs_name, n=nw))
- out = '\n'.join(lines)
- LOG.debug(_("Used networks:") + "\n" + out)
-
- lines = []
- for pair in self.dns_mapping['forward']:
- line = ' * {n!r} => {a!r}'.format(n=pair[0], a=str(pair[1]))
- lines.append(line)
- LOG.debug(_("Used forward DNS entries:") + "\n" + '\n'.join(lines))
-
- lines = []
- for pair in self.dns_mapping['reverse']:
- line = ' * {a!r} => {n!r}'.format(n=pair[1], a=str(pair[0]))
- lines.append(line)
- LOG.debug(_("Used reverse DNS entries:") + "\n" + '\n'.join(lines))
-
- # -------------------------------------------------------------------------·
- def _validate_interfaces_vm(self, vm):
-
- vs_name = vm.vsphere
- LOG.debug(_("Checking interfaces of VM {n!r} in VSPhere {v!r} ...").format(
- n=vm.name, v=vs_name))
-
- if not vm.interfaces:
- LOG.error(_("No interfaces defined for VM {!r}.").format(vm.name))
- self.eval_errors += 1
- return
-
- vsphere = self.vsphere[vs_name]
-
- vm_cluster = None
- for cluster in vsphere.clusters:
- if cluster.name.lower() == vm.cluster.lower():
- vm_cluster = cluster
- break
- if not vm_cluster:
- msg = _("Did not found cluster object {c!r} for VM {n!r}.").format(
- c=vm.cluster, n=vm.name)
- raise HandlerError(msg)
-
- i = -1
- for iface in vm.interfaces:
- i += 1
- self._validate_interface_of_vm(
- vm_name=vm.name, iface=iface, vs_name=vs_name, vm_cluster=vm_cluster, i=i)
-
- # -------------------------------------------------------------------------·
- def _validate_interface_of_vm(self, vm_name, iface, vs_name, vm_cluster, i=0):
-
- vsphere = self.vsphere[vs_name]
-
- if self.verbose > 1:
- LOG.debug(_("Checking interface {i} of VM {n!r} ...").format(
- i=i, n=vm_name))
-
- if not iface.address:
- LOG.error(_("Interface {i} of VM {n!r} has no defined address.").format(
- i=i, n=vm_name))
- self.eval_errors += 1
- return
-
- if not iface.fqdn:
- LOG.error(_("Interface {i} of VM {n!r} has no defined FQDN.").format(
- i=i, n=vm_name))
- self.eval_errors += 1
- return
-
- if iface.fqdn in self.fqdns:
- LOG.error(_(
- "FQDN {f!r} already defined for VM {va!r}({ia}) should be set "
- "for interface {ib} of {vb!r}.").format(
- f=iface.fqdn, va=self.fqdns[iface.fqdn][0], ia=self.fqdns[iface.fqdn][1],
- ib=i, vb=vm_name))
- self.eval_errors += 1
- return
-
- self.fqdns[iface.fqdn] = (vm_name, i)
-
- if iface.address_v4:
- if iface.address_v4 in self.addresses:
- LOG.error(_(
- "IPv4 address {a} already defined for VM {va!r}({ia}) should be set "
- "for interface {ib} of {vb!r}.").format(
- a=iface.address_v4, va=self.fqdns[iface.fqdn][0],
- ia=self.fqdns[iface.fqdn][1], ib=i, vb=vm_name))
- self.eval_errors += 1
- return
- self.addresses[iface.address_v4] = (vm_name, i)
- pair = (iface.fqdn, iface.address_v4)
- self.dns_mapping['forward'].append(pair)
- pair = (iface.address_v4, iface.fqdn)
- self.dns_mapping['reverse'].append(pair)
-
- if iface.address_v6:
- if iface.address_v6 in self.addresses:
- LOG.error(_(
- "IPv6 address {a} already defined for VM {va!r}({ia}) should be set "
- "for interface {ib} of {vb!r}.").format(
- a=iface.address_v6, va=self.fqdns[iface.fqdn][0],
- ia=self.fqdns[iface.fqdn][1], ib=i, vb=vm_name))
- self.eval_errors += 1
- return
- self.addresses[iface.address_v6] = (vm_name, i)
- pair = (iface.fqdn, iface.address_v6)
- self.dns_mapping['forward'].append(pair)
- pair = (iface.address_v6, iface.fqdn)
- self.dns_mapping['reverse'].append(pair)
-
- network = iface.network
- if network:
- if network not in vsphere.networks:
- LOG.error(_(
- "Could not find network {n!r} for VM {v!r}, interface {i}.").format(
- n=network, v=vm_name, i=i))
- self.eval_errors += 1
- return
- else:
- network = vsphere.networks.get_network_for_ip(
- iface.address_v4, iface.address_v6)
- if not network:
- self.eval_errors += 1
- return
- iface.network = network
- LOG.debug(_("Found network {n!r} for interface {i} of VM {v!r}.").format(
- n=network, i=i, v=vm_name))
-
- if network not in vm_cluster.networks:
- LOG.error(_(
- "Network {n!r} for interface {i} of VM {v!r} not available in "
- "cluster {c!r}.").format(n=network, v=vm_name, i=i, c=vm_cluster.name))
- self.eval_errors += 1
- return
- LOG.debug(_("Network {n!r} is available in cluster {c!r}.").format(
- n=network, c=vm_cluster.name))
-
- net = vsphere.networks[network]
- if not iface.gateway:
- LOG.debug(_("Setting gateway of interface {i} of VM {v!r} to {g}.").format(
- i=i, v=vm_name, g=net.gateway))
- iface.gateway = net.gateway
-
- if net.network:
- if net.network.version == 4:
- if iface.netmask_v4 is None:
- iface.netmask_v4 = net.network.prefixlen
- else:
- if iface.netmask_v6 is None:
- iface.netmask_v6 = net.network.prefixlen
-
- if vs_name not in self.used_networks:
- self.used_networks[vs_name] = []
- if network not in self.used_networks[vs_name]:
- self.used_networks[vs_name].append(network)
-
- # -------------------------------------------------------------------------·
- def ensure_vsphere_folders(self):
-
- vs_name = None
- for vs_name in self.vsphere.keys():
- break
- vsphere = self.vsphere[vs_name]
-
- print()
- LOG.info(_("Ensuring existence of all necessary vSphere VM folders."))
- vsphere.ensure_vm_folders(copy.copy(self.vsphere_folders))
-
-
-# =============================================================================
-
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
+++ /dev/null
-../../locale
\ No newline at end of file
+++ /dev/null
-#!/bin/env python3
-# -*- coding: utf-8 -*-
-
-__version__ = '1.0.0'
-
-# vim: ts=4 et list
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2024 by Frank Brehm, Berlin
-@summary: The module for a VM disk destinated to Terraform
-"""
-from __future__ import absolute_import
-
-# Standard modules
-import logging
-import copy
-
-try:
- from collections.abc import MutableMapping
-except ImportError:
- from collections import MutableMapping
-
-from numbers import Number
-
-# Third party modules
-
-# Own modules
-from fb_tools.obj import FbBaseObject
-
-from ..config import CrTfConfiguration
-
-from ..errors import TerraformVmDefinitionError
-from ..errors import TerraformVmTooManyDisksError
-
-from ..xlate import XLATOR
-
-__version__ = '1.3.0'
-
-LOG = logging.getLogger(__name__)
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-
-# =============================================================================
-class TerraformDisk(FbBaseObject):
- """A class encapsulating a disk of a VirtualMachine managed by Terraform."""
-
- default_size = CrTfConfiguration.default_disk_size
-
- min_size_gb = CrTfConfiguration.default_disk_min_size
- max_size_gb = CrTfConfiguration.default_disk_max_size
-
- disks_per_scsi_ctrlr = 15
- max_scsi_ctrlrs = 4
- max_scsi_disks = disks_per_scsi_ctrlr * max_scsi_ctrlrs
-
- msg_no_disk_dict = _("Object {o!r} is not a {e} object.")
-
- # -------------------------------------------------------------------------
- def __init__(
- self, name=None, root_disk=False, unit_number=0, size_gb=None,
- version=__version__, initialized=False, *args, **kwargs):
-
- self._name = 'disk'
- self._root_disk = bool(root_disk)
- self._unit_number = 0
- self._size_gb = self.default_size
-
- super(TerraformDisk, self).__init__(
- version=version,
- initialized=False,
- *args, **kwargs,
- )
-
- if name:
- self.name = name
-
- self._set_unit_number(unit_number)
- if size_gb is not None:
- self.size_gb = size_gb
-
- self.initialized = initialized
-
- # -----------------------------------------------------------
- @property
- def name(self):
- """The name of the disk."""
- return self._name
-
- @name.setter
- def name(self, value):
- if value is None:
- msg = _("The name of a disk don't may be None.")
- raise TerraformVmDefinitionError(msg)
- v = str(value).strip()
- if v == '':
- msg = _("The name of a disk don't may be empty.")
- raise TerraformVmDefinitionError(msg)
- self._name = v
-
- # -----------------------------------------------------------
- @property
- def root_disk(self):
- """A flag indicating, that this is the root disk of a VM."""
- return self._root_disk
-
- @root_disk.setter
- def root_disk(self, value):
- self._root_disk = bool(value)
-
- # -----------------------------------------------------------
- @property
- def unit_number(self):
- """Number of CPUs of the VM (num_cores_per_socket is always 1)."""
- return self._unit_number
-
- # -----------------------------------------------------------
- @property
- def size_gb(self):
- """Size of the disk in GiB."""
- return self._size_gb
-
- @size_gb.setter
- def size_gb(self, value):
- val = float(value)
- msg = _("Invalid disk size {n} - size must be {min} <= SIZE <= {max}.").format(
- n=val, min=self.min_size_gb, max=self.max_size_gb)
- if val < self.min_size_gb or val > self.max_size_gb:
- raise ValueError(msg)
- self._size_gb = val
-
- # -------------------------------------------------------------------------
- def as_dict(self, short=True):
- """
- Transforms the elements of the object into a dict
-
- @param short: don't include local properties in resulting dict.
- @type short: bool
-
- @return: structure as dict
- @rtype: dict
- """
-
- res = super(TerraformDisk, self).as_dict(short=short)
- res['name'] = self.name
- res['default_size'] = self.default_size
- res['max_size_gb'] = self.max_size_gb
- res['min_size_gb'] = self.min_size_gb
- res['root_disk'] = self.root_disk
- res['size_gb'] = self.size_gb
- res['unit_number'] = self.unit_number
-
- return res
-
- # -------------------------------------------------------------------------
- def __repr__(self):
- """Typecast into a string for reproduction."""
- out = '<%s(' % (self.__class__.__name__)
-
- fields = []
- fields.append('name={!r}'.format(self.name))
- fields.append('root_disk={!r}'.format(self.root_disk))
- fields.append('unit_number={!r}'.format(self.unit_number))
- fields.append('size_gb={!r}'.format(self.size_gb))
- fields.append('appname={!r}'.format(self.appname))
- fields.append('verbose={!r}'.format(self.verbose))
- fields.append('base_dir={!r}'.format(self.base_dir))
- fields.append('initialized={!r}'.format(self.initialized))
-
- out += ', '.join(fields) + ')>'
- return out
-
- # -------------------------------------------------------------------------
- def _set_unit_number(self, value):
- val = int(value)
- if self.root_disk:
- self._unit_number = 0
- if val != 0:
- msg = _("A root disk must have always the unit number 0 (given {!r}).").format(
- value)
- raise ValueError(msg)
- return
- msg = _("Invalid unit number {n} - number must be {min} <= NUMBER <= {max}.").format(
- n=val, min=1, max=64)
- if val < 1 or val > 64:
- raise ValueError(msg)
-
- self._unit_number = val
-
- # -------------------------------------------------------------------------
- def __copy__(self):
-
- if self.verbose > 3:
- LOG.debug(_("Copying Terraform disk object with unit ID {}.").format(self.unit_number))
-
- disk = self.__class__(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir, name=self.name,
- initialized=self.initialized, root_disk=self.root_disk, unit_number=self.unit_number,
- size_gb=self.size_gb)
-
- return disk
-
- # -------------------------------------------------------------------------
- def __eq__(self, other):
-
- if not isinstance(other, TerraformDisk):
- raise TypeError(self.msg_no_disk_dict.format(o=other, e='TerraformDisk'))
-
- if self.name != other.name:
- return False
- if self.unit_number != other.unit_number:
- return False
- if self.root_disk != other.root_disk:
- return False
- if self.size_gb != other.size_gb:
- return False
-
- return True
-
-
-# =============================================================================
-class TerraformDiskDict(MutableMapping, FbBaseObject):
- """
- A dictionary containing TerraformDisk objects.
- It works like a dict.
- i.e.:
- * disks = TerraformDiskDict(TerraformDisk(name='disk0',unit_number=0, root=True, size_gb=48, ...))
- * disks[0] returns the first TerraformDisk object in the list of sorted disk names
- * disks['disk0'] returns the TerraformDisk object with the name 'disk0'.
- """
-
- msg_invalid_disk_type = _("Invalid disk type {{!r}} to set, only {} allowed.").format(
- 'TerraformDisk')
- msg_key_not_name = _("The key {k!r} must be equal to the name of the disk {n!r}.")
- msg_none_type_error = _("None type as key is not allowed.")
- msg_empty_key_error = _("Empty key {!r} is not allowed.")
- msg_no_disk_dict = _("Object {o!r} is not a {e} object.")
-
- # -------------------------------------------------------------------------
- def __init__(
- self, appname=None, verbose=0, version=__version__, base_dir=None, initialized=False,
- *disks):
-
- self._map = dict()
-
- super(TerraformDiskDict, self).__init__(
- appname=appname, verbose=verbose, base_dir=base_dir, initialized=False)
-
- for disk in disks:
- self.append(disk)
-
- if initialized:
- self.initialized = True
-
- # -------------------------------------------------------------------------
- def _set_item(self, key, disk):
-
- if not isinstance(disk, TerraformDisk):
- raise TypeError(self.msg_invalid_disk_type.format(disk.__class__.__name__))
-
- if disk.name != key:
- msg = self.msg_key_not_name.format(k=key, n=disk.name)
-
- self._map[key] = disk
-
- # -------------------------------------------------------------------------
- def append(self, disk):
-
- if not isinstance(disk, TerraformDisk):
- raise TypeError(self.msg_invalid_disk_type.format(disk.__class__.__name__))
-
- self._set_item(disk.name, disk)
-
- # -------------------------------------------------------------------------
- def _get_item(self, key):
-
- if key is None:
- raise TypeError(self.msg_none_type_error)
-
- if isinstance(key, Number):
- num = int(key)
- keys = self.keys()
- name = keys[num]
- return self._map[name]
-
- return self._map[key]
-
- # -------------------------------------------------------------------------
- def get(self, key):
- return self._get_item(key)
-
- # -------------------------------------------------------------------------
- def _del_item(self, key, strict=True):
-
- if key is None:
- raise TypeError(self.msg_none_type_error)
-
- name = str(key)
- if isinstance(key, Number):
- num = int(key)
- keys = self.keys()
- name = keys[num]
-
- if not strict and name not in self._map:
- return
-
- del self._map[name]
-
- # -------------------------------------------------------------------------
- # The next five methods are requirements of the ABC.
- def __setitem__(self, key, value):
- self._set_item(key, value)
-
- # -------------------------------------------------------------------------
- def __getitem__(self, key):
- return self._get_item(key)
-
- # -------------------------------------------------------------------------
- def __delitem__(self, key):
- self._del_item(key)
-
- # -------------------------------------------------------------------------
- def __iter__(self):
-
- for name in self.keys():
- yield name
-
- # -------------------------------------------------------------------------
- def __len__(self):
- return len(self._map)
-
- # -------------------------------------------------------------------------
- # The next methods aren't required, but nice for different purposes:
- def __str__(self):
- """returns simple dict representation of the mapping"""
- return str(self._map)
-
- # -------------------------------------------------------------------------
- def __repr__(self):
- '''echoes class, id, & reproducible representation in the REPL'''
- return '{}, {}({})'.format(
- super(TerraformDiskDict, self).__repr__(),
- self.__class__.__name__,
- self._map)
-
- # -------------------------------------------------------------------------
- def __contains__(self, key):
-
- if key is None:
- raise TypeError(self.msg_none_type_error)
-
- return key in self._map
-
- # -------------------------------------------------------------------------
- def keys(self):
-
- return sorted(self._map.keys(), key=str.lower)
-
- # -------------------------------------------------------------------------
- def items(self):
-
- item_list = []
-
- for name in self.keys():
- item_list.append((name, self._map[name]))
-
- return item_list
-
- # -------------------------------------------------------------------------
- def values(self):
-
- value_list = []
- for name in self.keys():
- value_list.append(self._map[name])
- return value_list
-
- # -------------------------------------------------------------------------
- def __eq__(self, other):
-
- if not isinstance(other, TerraformDiskDict):
- raise TypeError(self.msg_no_disk_dict.format(o=other, e='TerraformDiskDict'))
-
- return self._map == other._map
-
- # -------------------------------------------------------------------------
- def __ne__(self, other):
-
- if not isinstance(other, TerraformDiskDict):
- raise TypeError(self.msg_no_disk_dict.format(o=other, e='TerraformDiskDict'))
-
- return self._map != other._map
-
- # -------------------------------------------------------------------------
- def pop(self, key, *args):
-
- if key is None:
- raise TypeError(self.msg_none_type_error)
-
- return self._map.pop(key, *args)
-
- # -------------------------------------------------------------------------
- def popitem(self):
-
- if not len(self._map):
- return None
-
- name = self.keys()[0]
- disk = self._map[name]
- del self._map[name]
- return (name, disk)
-
- # -------------------------------------------------------------------------
- def clear(self):
- self._map = dict()
-
- # -------------------------------------------------------------------------
- def setdefault(self, key, default):
-
- if key is None:
- raise TypeError(self.msg_none_type_error)
-
- if not isinstance(default, TerraformDisk):
- raise TypeError(self.msg_invalid_disk_type.format(default.__class__.__name__))
-
- if key in self._map:
- return self._map[unit_number]
-
- self._set_item(key, default)
- return default
-
- # -------------------------------------------------------------------------
- def update(self, other):
-
- if isinstance(other, TerraformDiskDict) or isinstance(other, dict):
- for name in other.keys():
- self._set_item(name, other[name])
- return
-
- for tokens in other:
- key = tokens[0]
- value = tokens[1]
- self._set_item(key, value)
-
- # -------------------------------------------------------------------------
- def as_dict(self, short=True):
-
- res = {}
- res = super(TerraformDiskDict, self).as_dict(short=short)
- res['map'] = {}
-
- for name in self._map:
- res['map'][name] = self._map[name].as_dict(short)
-
- return res
-
- # -------------------------------------------------------------------------
- def as_list(self, short=True):
-
- res = []
- for name in self.keys():
- res.append(self._map[name].as_dict(short))
- return res
-
- # -------------------------------------------------------------------------
- def __copy__(self):
-
- if self.verbose > 2:
- LOG.debug(_("Copying Terraform disk dictionary ..."))
-
- new = self.__class__(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- initialized=False)
-
- for name in self._map:
- new.append(copy.copy(self._map[name]))
-
- if self.initialized:
- new.initialized = True
-
- return new
-
- # -------------------------------------------------------------------------
- def get_ctrlr_count(self):
-
- if len(self) <= 1:
- return 1
- if len(self) >= TerraformDisk.max_scsi_ctrlrs:
- return TerraformDisk.max_scsi_ctrlrs
- return len(self)
-
-
-# =============================================================================
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2024 by Frank Brehm, Berlin
-@summary: The module for a VM interface destinated to Terraform
-"""
-from __future__ import absolute_import
-
-# Standard modules
-import logging
-import re
-import ipaddress
-
-try:
- from collections.abc import Mapping
-except ImportError:
- from collections import Mapping
-
-# Third party modules
-
-# Own modules
-from fb_tools.common import pp, to_bool, RE_FQDN
-
-from fb_tools.obj import FbBaseObject
-
-from ..errors import TerraformVmDefinitionError
-
-from ..xlate import XLATOR
-
-__version__ = '1.0.1'
-LOG = logging.getLogger(__name__)
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-
-# =============================================================================
-class TerraformInterface(FbBaseObject):
- """A class encapsulating a network interface of a VirtualMachine managed by Terraform."""
-
- re_address = re.compile(r'^\s*address\s*$', re.IGNORECASE)
- re_address_v4 = re.compile(r'^\s*address[_-]?(?:ip)?v4\s*$', re.IGNORECASE)
- re_address_v6 = re.compile(r'^\s*address[_-]?(?:ip)?v6\s*$', re.IGNORECASE)
- re_fqdn = re.compile(r'^\s*fqdn\s*$', re.IGNORECASE)
- re_gateway = re.compile(r'^\s*gateway\s*$', re.IGNORECASE)
- re_gateway_v4 = re.compile(r'^\s*gateway[_-]?(?:ip)?v4\s*$', re.IGNORECASE)
- re_gateway_v6 = re.compile(r'^\s*gateway[_-]?(?:ip)?v6\s*$', re.IGNORECASE)
- re_v4_before_v6 = re.compile(
- r'^\s*(?:ip)?v4[_-](?:before|primary[_-]to)[_-](?:ip)?v6\s*$', re.IGNORECASE)
- re_network = re.compile(r'^\s*network\s*$', re.IGNORECASE)
-
- # -------------------------------------------------------------------------
- def __init__(
- self, appname=None, verbose=0, version=__version__, base_dir=None, initialized=False,
- address_v4=None, address_v6=None, fqdn=None, network=None, ipv4_primary=True,
- gateway_v4=None, gateway_v6=None, netmask_v4=None, netmask_v6=None):
-
- self._address_v4 = None
- self._netmask_v4 = None
- self._address_v6 = None
- self._netmask_v4 = None
- self._netmask_v6 = None
- self._fqdn = None
- self._network = None
- self._gateway_v4 = None
- self._gateway_v6 = None
- self._ipv4_primary = bool(ipv4_primary)
-
- super(TerraformInterface, self).__init__(
- appname=appname, verbose=verbose, version=version, base_dir=base_dir,
- initialized=False,
- )
-
- if address_v4 is not None:
- self.address_v4 = address_v4
- if address_v6 is not None:
- self.address_v6 = address_v6
- if fqdn is not None:
- self.fqdn = fqdn
- if network is not None:
- self.network = network
- if gateway_v4 is not None:
- self.gateway_v4 = gateway_v4
- if gateway_v6 is not None:
- self.gateway_v6 = gateway_v6
- if netmask_v4 is not None:
- self.netmask_v4 = netmask_v4
- if netmask_v6 is not None:
- self.netmask_v6 = netmask_v6
-
- self.initialized = initialized
-
- # -----------------------------------------------------------
- @property
- def ipv4_primary(self):
- """Is the IPv6 address prior to the IPv6 address, if both are existing?"""
- return self._ipv4_primary
-
- @ipv4_primary.setter
- def ipv4_primary(self, value):
- self._ipv4_primary = bool(value)
-
- # -----------------------------------------------------------
- @property
- def address_v4(self):
- """The IPv4 address of the interface."""
- return self._address_v4
-
- @address_v4.setter
- def address_v4(self, value):
- if value is None:
- self._address_v4 = None
- return
- val = str(value).strip()
- if val == '':
- self._address_v4 = None
- return
-
- addr = ipaddress.ip_address(val)
- if addr.version != 4:
- msg = _("IP address {!r} is not an IPv4 address.").format(addr)
- raise ValueError(msg)
-
- self._address_v4 = addr
-
- # -----------------------------------------------------------
- @property
- def address_v6(self):
- """The IPv6 address of the interface."""
- return self._address_v6
-
- @address_v6.setter
- def address_v6(self, value):
- if value is None:
- self._address_v6 = None
- return
- val = str(value).strip()
- if val == '':
- self._address_v6 = None
- return
-
- addr = ipaddress.ip_address(val)
- if addr.version != 6:
- msg = _("IP address {!r} is not an IPv6 address.").format(addr)
- raise ValueError(msg)
-
- self._address_v6 = addr
-
- # -----------------------------------------------------------
- @property
- def address(self):
- """The IPv4 or IPv6 address of the interface."""
- if self.address_v4 and self.address_v6:
- if self.ipv4_primary:
- return self.address_v4
- else:
- return self.address_v6
- if self.address_v4:
- return self.address_v4
- if self.address_v6:
- return self.address_v6
- return None
-
- @address.setter
- def address(self, value):
- if value is None:
- return
- val = str(value).strip()
- if val == '':
- return
-
- addr = ipaddress.ip_address(val)
- if addr.version == 6:
- self._address_v6 = addr
- else:
- self._address_v4 = addr
-
- # -----------------------------------------------------------
- @property
- def fqdn(self):
- """The FQDN of the interface address to define."""
- return self._fqdn
-
- @fqdn.setter
- def fqdn(self, value):
- if value is None:
- self._fqdn = None
- return
-
- val = str(value).strip().lower()
- if val == '':
- self._fqdn = None
- return
-
- if not RE_FQDN.search(val):
- msg = _("The hostname {!r} is no a valid FQDN.").format(value)
- raise ValueError(msg)
- self._fqdn = val
-
- # -----------------------------------------------------------
- @property
- def network(self):
- """The name of the VSphere network of the interface."""
- return self._network
-
- @network.setter
- def network(self, value):
- if value is None:
- self._network = None
- return
-
- val = str(value).strip()
- if val == '':
- self._network = None
- return
-
- self._network = val
-
- # -----------------------------------------------------------
- @property
- def gateway_v4(self):
- """The IPv4 gateway of the interface."""
- return self._gateway_v4
-
- @gateway_v4.setter
- def gateway_v4(self, value):
- if value is None:
- self._gateway_v4 = None
- return
- val = str(value).strip()
- if val == '':
- self._gateway_v4 = None
- return
-
- addr = ipaddress.ip_address(val)
- if addr.version != 4:
- msg = _("IP gateway {!r} is not an IPv4 address.").format(addr)
- raise ValueError(msg)
-
- self._gateway_v4 = addr
-
- # -----------------------------------------------------------
- @property
- def gateway_v6(self):
- """The IPv6 gateway of the interface."""
- return self._gateway_v6
-
- @gateway_v6.setter
- def gateway_v6(self, value):
- if value is None:
- self._gateway_v6 = None
- return
- val = str(value).strip()
- if val == '':
- self._gateway_v6 = None
- return
-
- addr = ipaddress.ip_address(val)
- if addr.version != 6:
- msg = _("IP gateway {!r} is not an IPv6 address.").format(addr)
- raise ValueError(msg)
-
- self._gateway_v6 = addr
-
- # -----------------------------------------------------------
- @property
- def netmask_v4(self):
- """The IPv4 netmask of the interface."""
- return self._netmask_v4
-
- @netmask_v4.setter
- def netmask_v4(self, value):
- if value is None:
- self._netmask_v4 = None
- return
- val = int(value)
- if val < 0 or val > 32:
- msg = _("Invalid IPv4 netmask {!r}").format(value)
- raise ValueError(msg)
-
- self._netmask_v4 = val
-
- # -----------------------------------------------------------
- @property
- def netmask_v6(self):
- """The IPv6 netmask of the interface."""
- return self._netmask_v6
-
- @netmask_v6.setter
- def netmask_v6(self, value):
- if value is None:
- self._netmask_v6 = None
- return
- val = int(value)
- if val < 0 or val > 128:
- msg = _("Invalid IPv6 netmask {!r}").format(value)
- raise ValueError(msg)
-
- self._netmask_v6 = val
-
- # -----------------------------------------------------------
- @property
- def gateway(self):
- """The IPv4 or IPv6 gateway of the interface."""
- if self.gateway_v4 and self.gateway_v6:
- if self.ipv4_primary:
- return self.gateway_v4
- else:
- return self.gateway_v6
- if self.gateway_v4:
- return self.gateway_v4
- if self.gateway_v6:
- return self.gateway_v6
- return None
-
- @gateway.setter
- def gateway(self, value):
- if value is None:
- return
- val = str(value).strip()
- if val == '':
- return
-
- addr = ipaddress.ip_address(val)
- if addr.version == 6:
- self._gateway_v6 = addr
- else:
- self._gateway_v4 = addr
-
- # -------------------------------------------------------------------------
- def as_dict(self, short=True):
- """
- Transforms the elements of the object into a dict
-
- @param short: don't include local properties in resulting dict.
- @type short: bool
-
- @return: structure as dict
- @rtype: dict
- """
-
- res = super(TerraformInterface, self).as_dict(short=short)
- res['address'] = self.address
- res['address_v4'] = self.address_v4
- res['address_v6'] = self.address_v6
- res['fqdn'] = self.fqdn
- res['gateway'] = self.gateway
- res['gateway_v4'] = self.gateway_v4
- res['gateway_v6'] = self.gateway_v6
- res['ipv4_primary'] = self.ipv4_primary
- res['netmask_v4'] = self.netmask_v4
- res['netmask_v6'] = self.netmask_v6
- res['network'] = self.network
-
- return res
-
- # -------------------------------------------------------------------------
- @classmethod
- def from_def(cls, if_def, appname=None, verbose=0, base_dir=None):
-
- if verbose > 2:
- LOG.debug(
- _("Trying to instantiate terraform interface from data:") + "\n" + pp(if_def))
-
- if not isinstance(if_def, Mapping):
- msg = _("Interface definition is not a dictionary:") + "\n" + pp(if_def)
- raise TerraformVmDefinitionError(msg)
-
- interface = cls(appname=appname, verbose=verbose, base_dir=base_dir)
- interface.initialized = False
-
- for key in sorted(if_def.keys(), key=str.lower):
-
- val = if_def[key]
-
- if verbose > 3:
- LOG.debug(_("Evaluating key {k!r}: {v}").format(k=key, v=val))
-
- if cls.re_address.search(key) and val:
- interface.address = val
- continue
- if cls.re_address_v4.search(key):
- interface.address_v4 = val
- continue
- if cls.re_address_v6.search(key):
- interface.address_v6 = val
- continue
- if cls.re_v4_before_v6.search(key):
- interface.ipv4_primary = to_bool(val)
- continue
- if cls.re_fqdn.search(key):
- interface.fqdn = val
- continue
- if cls.re_network.search(key):
- interface.network = val
- continue
- if cls.re_gateway.search(key) and val:
- interface.gateway = val
- continue
- if cls.re_gateway_v4.search(key):
- interface.gateway_v4 = val
- continue
- if cls.re_gateway_v6.search(key):
- interface.gateway_v6 = val
- continue
-
- interface.initialized = False
- return interface
-
- # -------------------------------------------------------------------------
- def __copy__(self):
-
- if self.verbose > 2:
- LOG.debug(_("Copying Terraform interface object with address {}.").format(
- self.address))
-
- disk = self.__class__(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- initialized=self.initialized, address_v4=self.address_v4, address_v6=self.address_v6,
- ipv4_primary=self.ipv4_primary, fqdn=self.fqdn, network=self.network,
- gateway_v4=self.gateway_v4, gateway_v6=self.gateway_v6
- )
-
- return disk
-
-
-# =============================================================================
-
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2024 by Frank Brehm, Berlin
-@summary: The module for a VM destinated to Terraform
-"""
-from __future__ import absolute_import
-
-# Standard modules
-import logging
-import re
-import copy
-import ipaddress
-
-try:
- from collections.abc import Iterable, Mapping
-except ImportError:
- from collections import Iterable, Mapping
-
-# Third party modules
-
-# Own modules
-from fb_tools.common import pp, to_bool, RE_FQDN, RE_TF_NAME
-from fb_tools.common import human2mbytes, is_sequence
-
-from fb_tools.handling_obj import HandlingObject
-
-from ..errors import TerraformVmDefinitionError
-from ..errors import TerraformVmTooManyDisksError
-
-from ..config import CrTfConfiguration
-
-from ..xlate import XLATOR
-
-from .disk import TerraformDisk, TerraformDiskDict
-
-from .interface import TerraformInterface
-
-__version__ = '1.7.1'
-
-LOG = logging.getLogger(__name__)
-
-PUPPET_TIERS = (
- 'production',
- 'live',
- 'test',
- 'stage',
- 'development',
-)
-
-PUPPET_ENVIRONMENTS = (
- 'production',
- 'test',
- 'development',
-)
-
-DS_TYPES = (
- 'ssd',
- 'sas',
- 'sata',
-)
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-
-# =============================================================================
-class TerraformVm(HandlingObject):
- """A class encapsulating a VirtualMachine managed by Terraform."""
-
- default_vsphere = 'live'
-
- default_boot_delay = 5
- default_customer = 'Pixelpark'
- default_ds_type = 'sata'
- default_folder = 'pixelpark'
- default_memory = 1024
- default_nameservers = (
- ipaddress.ip_address('93.188.109.13'),
- ipaddress.ip_address('217.66.52.10'),
- ipaddress.ip_address('212.91.225.75')
- )
- default_searchdomains = ('pixelpark.net', 'pixelpark.com')
- default_dns_options = 'timeout:1 attempts:2'
- default_num_cpus = 1
- default_puppet_contact = '8x5@pixelpark.com'
- default_puppet_customer = 'pixelpark'
- default_puppet_env = 'development'
- default_puppet_tier = 'development'
- default_puppet_role = 'default'
- default_purpose = "Customer project"
- default_rootdisk_size = 20.0
-
- valid_puppet_environments = []
- for env in PUPPET_ENVIRONMENTS:
- valid_puppet_environments.append(env)
-
- max_num_cpus = 64
- memory_chunk = 256
- max_memory = 512 * 1024
- max_boot_delay = 30
- min_rootdisk_size = CrTfConfiguration.default_root_min_size
- max_rootdisk_size = CrTfConfiguration.default_root_max_size
-
- max_nameservers = 4
- max_searchdomains = 5
-
- re_key_fqdn = re.compile(r'^\s*fqdn|name\s*$', re.IGNORECASE)
- re_key_vm_folder = re.compile(r'^\s*(?:vm[_-]?)folder\s*$', re.IGNORECASE)
- re_key_boot_delay = re.compile(r'^\s*boot[_-]?delay\s*$', re.IGNORECASE)
- re_key_ds_cluster = re.compile(r'^\s*(?:datastore|ds)[_-]?cluster\s*$', re.IGNORECASE)
- re_key_ds_type = re.compile(r'^\s*(?:datastore|ds)[_-]?type\s*$', re.IGNORECASE)
- re_key_puppet_contact = re.compile(r'^\s*puppet[_-]?contact\s*$', re.IGNORECASE)
- re_key_puppet_customer = re.compile(r'^\s*(?:puppet|hiera)[_-]?customer\s*$', re.IGNORECASE)
- re_key_puppet_project = re.compile(r'^\s*(?:puppet|hiera)[_-]?project\s*$', re.IGNORECASE)
- re_key_puppet_tier = re.compile(r'^\s*puppet[_-]?tier\s*$', re.IGNORECASE)
- re_key_puppet_env = re.compile(r'^\s*puppet[_-]?env(?:ironment)?\s*$', re.IGNORECASE)
- re_key_puppet_role = re.compile(r'^\s*puppet[_-]?role\s*$', re.IGNORECASE)
- re_key_puppet_initial_install = re.compile(
- r'^\s*puppet[_-]?initial[_-]?install\s*$', re.IGNORECASE)
- re_key_env = re.compile(r'^\s*env(?:ironment)?\s*$', re.IGNORECASE)
- re_key_initial_install = re.compile(r'^\s*initial[_-]?install\s*$', re.IGNORECASE)
- re_key_ns = re.compile(r'^\s*nameservers?\s*$', re.IGNORECASE)
- re_key_searchdomain = re.compile(r'^\s*search[_-]*domains?\s*$', re.IGNORECASE)
- re_key_dnsoptions = re.compile(r'^\s*(dns|resolv)[_-]*options?\s*$', re.IGNORECASE)
- re_key_root_disk = re.compile(r'^\s*root[_-]?disk\s*$', re.IGNORECASE)
- re_key_root_disk_size = re.compile(r'^\s*root[_-]?disk[_-]?size\s*$', re.IGNORECASE)
- re_key_data_disk = re.compile(r'^\s*data[_-]?disk\s*$', re.IGNORECASE)
- re_key_data_disks = re.compile(r'^\s*data[_-]?disks\s*$', re.IGNORECASE)
- re_key_interface = re.compile(r'^\s*interfaces?\s*$', re.IGNORECASE)
- re_key_has_backup = re.compile(r'^\s*has[_-]?backup\s*$', re.IGNORECASE)
- re_key_has_puppet = re.compile(r'^\s*has[_-]?puppet\s*$', re.IGNORECASE)
- re_key_is_rhel = re.compile(r'^\s*is[_-]?rhel\s*$', re.IGNORECASE)
- re_memory_value = re.compile(r'^\s*(\d+(?:\.\d*)?)\s*(?:(\D+)\s*)?$')
- re_rhel_template = re.compile(r'(?:rhel|red[\s_-]*hat[\s_-]*enterprise)', re.IGNORECASE)
-
- re_invalid_chars = re.compile(r'[^a-z0-9@\._-]', re.IGNORECASE)
- re_invalid_chars_role = re.compile(r'[^a-z0-9:@\._-]', re.IGNORECASE)
-
- re_disk_size = re.compile(r'^\s*size\s*$', re.IGNORECASE)
- re_disk_mountpoint = re.compile(r'^\s*mount[_-]?point\s*$', re.IGNORECASE)
- re_disk_vgname = re.compile(r'^\s*vg[_-]?name\s*$', re.IGNORECASE)
- re_disk_lvname = re.compile(r'^\s*lv[_-]?name\s*$', re.IGNORECASE)
- re_disk_fstype = re.compile(r'^\s*fs[_-]?type\s*$', re.IGNORECASE)
-
- re_fqdn_dot_at_end = re.compile(r'[^\.]\.$')
-
- # -------------------------------------------------------------------------
- def __init__(
- self, appname=None, verbose=0, version=__version__, base_dir=None,
- simulate=False, force=None, terminal_has_colors=False, initialized=False,
- is_template=True, name=None, fqdn=None, folder=None, num_cpus=None, memory=None,
- cluster=None, boot_delay=None, ds_cluster=None, datastore=None, ds_type=None,
- customer=None, rootdisk_size=None, purpose=None, puppet_contact=None, puppet_role=None,
- puppet_customer=None, puppet_project=None, puppet_tier=None, puppet_env=None,
- puppet_initial_install=True, vm_template=None, nameservers=None, searchdomains=None,
- dns_options=None, has_backup=True, has_puppet=True, already_existing=None,
- vsphere=None, is_rhel=None):
-
- self._vsphere = self.default_vsphere
- self._is_template = bool(is_template)
- self._name = None
- self._fqdn = None
- self._cluster = None
- self._folder = self.default_folder
- self._num_cpus = self.default_num_cpus
- self._memory = self.default_memory
- self._boot_delay = self.default_boot_delay
- self._ds_cluster = None
- self._datastore = None
- self._ds_type = self.default_ds_type
- self._customer = self.default_customer
- self._rootdisk_size = self.default_rootdisk_size
- self._purpose = self.default_purpose
- self._puppet_contact = self.default_puppet_contact
- self._puppet_customer = self.default_puppet_customer
- self._puppet_project = None
- self._puppet_tier = self.default_puppet_tier
- self._puppet_env = None
- self._puppet_role = self.default_puppet_role
- self._puppet_initial_install = bool(puppet_initial_install)
- self._vm_template = None
- self._has_backup = bool(has_backup)
- self._has_puppet = bool(has_puppet)
- self._already_existing = False
- self._is_rhel = None
-
- self.disks = None
- self.interfaces = []
-
- self.nameservers = copy.copy(self.default_nameservers)
- self.searchdomains = copy.copy(self.default_searchdomains)
- self.dns_options = copy.copy(self.default_dns_options)
-
- super(TerraformVm, self).__init__(
- appname=appname, verbose=verbose, version=version, base_dir=base_dir,
- simulate=simulate, force=force, terminal_has_colors=terminal_has_colors,
- initialized=False,
- )
-
- self._post_init(
- name=name, fqdn=fqdn, num_cpus=num_cpus, memory=memory, folder=folder,
- boot_delay=boot_delay, vm_template=vm_template, puppet_contact=puppet_contact,
- puppet_customer=puppet_customer, puppet_tier=puppet_tier, puppet_env=puppet_env,
- puppet_initial_install=puppet_initial_install, is_rhel=is_rhel,
- cluster=cluster, rootdisk_size=rootdisk_size, nameservers=nameservers,
- searchdomains=searchdomains, dns_options=dns_options, purpose=purpose,
- customer=customer, ds_cluster=ds_cluster, datastore=datastore, ds_type=ds_type,
- already_existing=already_existing, initialized=initialized, puppet_role=puppet_role,
- puppet_project=puppet_project, vsphere=vsphere)
-
- # -------------------------------------------------------------------------
- def _post_init(
- self, name=None, fqdn=None, nameservers=None, searchdomains=None,
- initialized=False, **kwargs):
-
- self.disks = TerraformDiskDict(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir)
-
- if name and str(name).strip():
- self._name = str(name).strip()
-
- if not self.is_template and fqdn is not None:
- self.fqdn = fqdn
-
- for (key, val) in kwargs.items():
- if val is None:
- continue
- if hasattr(self, str(key)):
- setattr(self, str(key), val)
-
- if nameservers is not None:
- self.nameservers = self._get_ns_list(nameservers)
- if searchdomains is not None:
- self.searchdomains = self._get_searchdomain_list(searchdomains)
-
- if self.is_template:
- if self.fqdn:
- msg = _("A VM template definition may not have a FQDN (found: {!r}).").format(
- self.fqdn)
- raise TerraformVmDefinitionError(msg)
- if not self.name:
- msg = _("A VM template definition must have a name.")
- raise TerraformVmDefinitionError(msg)
- else:
- if not self.fqdn:
- msg = _("A VM definition (no template) must have a FQDN.")
- raise TerraformVmDefinitionError(msg)
-
- self.apply_root_disk()
-
- self.initialized = initialized
-
- # -------------------------------------------------------------------------
- @classmethod
- def from_def(
- cls, vm_def, name=None, is_template=False, template_vm=None, appname=None,
- verbose=0, base_dir=None, simulate=False, force=False,
- terminal_has_colors=False, initialized=False):
-
- if verbose > 2:
- LOG.debug(_("Trying to instantiate VM from data:") + "\n" + pp(vm_def))
-
- if not isinstance(vm_def, Mapping):
- msg = _("VM definition is not a dictionary:") + "\n" + pp(vm_def)
- raise TerraformVmDefinitionError(msg)
-
- if template_vm:
- if not isinstance(template_vm, TerraformVm):
- msg = _("Given parameter {!r} is not a TerraformVm object.").format(template_vm)
- raise TypeError(msg)
- vm = copy.copy(template_vm)
- vm.appname = appname
- vm.verbose = verbose
- vm.base_dir = base_dir
- vm.simulate = simulate
- vm.force = force
- vm.terminal_has_colors = terminal_has_colors
- else:
- vm = cls(
- appname=appname, verbose=verbose, base_dir=base_dir, simulate=simulate,
- force=force, is_template=is_template, name=name,
- terminal_has_colors=terminal_has_colors)
- vm.initialized = False
-
- vm.is_template = is_template
- vm.name = name
-
- for (key, value) in vm_def.items():
- cls._apply_vmdef2vm(
- vm, key, value, verbose=verbose, appname=appname, base_dir=base_dir)
-
- vm.apply_root_disk()
- if vm.interfaces and vm.fqdn and not vm.interfaces[0].fqdn:
- vm.interfaces[0].fqdn = vm.fqdn
-
- if not vm.is_template:
- if vm.is_rhel is None:
- vm.is_rhel = cls.guess_rhel(vm)
-
- vm.initialized = True
- return vm
-
- # -------------------------------------------------------------------------
- @classmethod
- def _apply_vmdef2vm(cls, vm, key, value, verbose=0, appname=None, base_dir=None):
-
- if verbose > 3:
- LOG.debug(_("Evaluating key {k!r}: {v}").format(k=key, v=value))
-
- if cls._apply_general_vmdef2vm(vm, key, value, verbose):
- return
-
- if key.lower() == 'customer' and value.strip():
- vm.customer = value.strip()
- return
-
- if key.lower() == 'purpose' and value:
- vm.purpose = value.strip()
- return
-
- if key.lower() == 'template' and value:
- vm.vm_template = value
- return
-
- if cls.re_key_has_backup.search(key):
- vm.has_backup = to_bool(value)
- return
-
- if cls._apply_puppet_vmdef2vm(vm, key, value, verbose):
- return
-
- if cls._apply_disk_vmdef2vm(vm, key, value, verbose):
- return
-
- if cls.re_key_ns.search(key):
- if isinstance(value, Iterable):
- ns = cls._get_ns_list(value)
- if ns:
- vm.nameservers = ns
- elif value is None:
- vm.nameservers = []
- else:
- LOG.error(_("Could not evaluate nameservers from {!r}.").format(value))
- return
-
- if cls.re_key_searchdomain.search(key):
- if isinstance(value, Iterable):
- domains = cls._get_searchdomain_list(value)
- if domains:
- vm.searchdomains = domains
- elif value is None:
- vm.searchdomains = []
- else:
- LOG.error(_("Could not evaluate search domains from {!r}.").format(value))
- return
-
- if cls.re_key_dnsoptions.search(key):
- if value is None:
- vm.dns_options = None
- else:
- val = value.strip().lower()
- if val:
- vm.dns_options = val
- else:
- vm.dns_options = None
- return
-
- if cls.re_key_interface.search(key):
- if vm.is_template:
- LOG.error(_("Template definitions may not have interface definitions."))
- return
- if isinstance(value, Iterable):
- for if_def in value:
- interface = TerraformInterface.from_def(
- if_def, appname=appname, verbose=verbose, base_dir=base_dir)
- vm.interfaces.append(interface)
- else:
- LOG.error(_("Could not evaluate interfaces from {!r}.").format(value))
- return
-
- LOG.debug(_("Unknown VM definition key {k!r} with value: {v!r}.").format(
- k=key, v=value))
-
- # -------------------------------------------------------------------------
- @classmethod
- def _apply_general_vmdef2vm(cls, vm, key, value, verbose=0):
-
- if not vm.is_template and cls.re_key_fqdn.search(key):
- vm.fqdn = value
- return True
-
- if key.lower() == 'vsphere' and value:
- if verbose > 2:
- LOG.debug(_("Applying vSphere {!r} to VM.").format(value))
- vm.vsphere = value
- return True
-
- if key.lower() == 'cluster':
- vm.cluster = value
- return True
-
- if key.lower() == 'num_cpus':
- vm.num_cpus = value
- return True
-
- if key.lower() == 'memory':
- vm.memory = value
- return True
-
- if cls.re_key_vm_folder.search(key) and value:
- vm.folder = value
- return True
-
- if cls.re_key_boot_delay.search(key) and value:
- vm.boot_delay = value
- return True
-
- if cls.re_key_ds_cluster.search(key) and value:
- vm.ds_cluster = value
- return True
-
- if key.lower() == 'datastore' and value:
- vm.datastore = value
- return True
-
- if cls.re_key_ds_type.search(key) and value:
- vm.ds_type = value
- return True
-
- if cls.re_key_is_rhel.search(key) and value:
- vm.is_rhel = value
- return True
-
- return False
-
- # -------------------------------------------------------------------------
- @classmethod
- def _apply_disk_vmdef2vm(cls, vm, key, value, verbose=0):
-
- if cls.re_key_root_disk_size.search(key):
- vm.rootdisk_size = value
- return True
-
- max_disks = TerraformDisk.max_scsi_disks
-
- LOG.debug(_("Evaluating disk data of VM {!r} ...").format(vm.name))
-
- if cls.re_key_root_disk.search(key):
- if isinstance(value, Mapping):
- for (p_key, p_val) in value.items():
- if p_key.lower() == 'size':
- vm.rootdisk_size = p_val
- else:
- LOG.error(_(
- "Could not evaluate size of root disk, {!r} is not a dictionary.").format(
- value))
- return True
-
- if cls.re_key_data_disk.search(key):
- unit_number = vm._get_disk_unit(1)
- if isinstance(value, Mapping):
- vm._add_data_disk(value, 'disk1', unit_number)
- elif value is None:
- if unit_number in vm.disks:
- del vm.disks[unit_number]
- else:
- LOG.error(_("Could not evaluate data disk from {!r}.").format(value))
- return True
-
- if cls.re_key_data_disks.search(key):
- if is_sequence(value):
- current_disk = 1
- if len(vm.disks) == 2:
- current_disk = 2
- total_disks = 2 + len(value)
- else:
- total_disks = 1 + len(value)
-
- if total_disks > max_disks:
- raise TerraformVmTooManyDisksError(total_disks, max_disks)
-
- # unit_number = vm._get_disk_unit(current_disk)
-
- for disk_data in value:
- name = "disk{}".format(current_disk)
- unit_number = vm._get_disk_unit(current_disk)
- vm._add_data_disk(disk_data, name, unit_number)
- current_disk += 1
- elif value is None:
- if verbose > 1:
- LOG.debug(_("Data disks for VM {!r} were set to None.").format(vm.name))
- else:
- LOG.error(_("Could not evaluate data disks from {!r}.").format(value))
- return True
-
- LOG.debug(_("The VM {vm!r} has {nrd} disks and {nrc} SCSI controllers.").format(
- vm=vm.name, nrd=len(vm.disks), nrc=vm.disks.get_ctrlr_count()))
-
- return False
-
- # -------------------------------------------------------------------------
- @classmethod
- def _apply_puppet_vmdef2vm(cls, vm, key, value, verbose=0):
-
- if key.lower() == 'puppet' and isinstance(value, Mapping):
- for (p_key, p_value) in value.items():
- cls._apply_puppetsub_vmdef2vm(
- vm=vm, p_key=p_key, p_value=p_value, verbose=verbose)
- return True
-
- if cls.re_key_has_puppet.search(key):
- vm.has_puppet = to_bool(value)
- return True
-
- if not hasattr(value, 'strip'):
- if verbose > 3:
- LOG.debug(_("Key {k!r} has no string value, but a {c!r} instead.").format(
- k=key, c=value.__class__.__name__))
- return False
-
- if isinstance(value, str):
- val_stripped = value.strip()
- else:
- val_stripped = str(value)
-
- if cls.re_key_puppet_contact.search(key) and val_stripped:
- if cls.re_invalid_chars.search(val_stripped):
- LOG.error(_("Invalid contact name {!r}.").format(value))
- else:
- vm.puppet_contact = val_stripped
- return True
-
- if cls.re_key_puppet_customer.search(key) and val_stripped:
- if cls.re_invalid_chars.search(val_stripped):
- LOG.error(_("Invalid puppet customer name {!r}.").format(value))
- else:
- vm.puppet_customer = val_stripped
- return True
-
- if cls.re_key_puppet_project.search(key) and val_stripped:
- if cls.re_invalid_chars.search(val_stripped):
- LOG.error(_("Invalid puppet customer project name {!r}.").format(value))
- else:
- vm.puppet_project = val_stripped
- return True
-
- if cls.re_key_puppet_role.search(key) and val_stripped:
- if cls.re_invalid_chars_role.search(val_stripped):
- LOG.error(_("Invalid puppet role {!r}.").format(value))
- else:
- vm.puppet_role = val_stripped
- return True
-
- if cls.re_key_puppet_initial_install.search(key):
- vm.puppet_initial_install = value
- return True
-
- if cls.re_key_puppet_tier.search(key) and val_stripped:
- if cls.re_invalid_chars.search(val_stripped):
- LOG.error(_("Invalid puppet tier {!r}.").format(value))
- else:
- vm.puppet_tier = val_stripped
- return True
-
- if cls.re_key_puppet_env.search(key) and val_stripped:
- if verbose > 2:
- LOG.debug(_("Setting Puppet environment to {!r}.").format(val_stripped))
- if cls.re_invalid_chars.search(val_stripped):
- LOG.error(_("Invalid puppet environment {!r}.").format(value))
- else:
- vm.puppet_env = val_stripped
- return True
-
- return False
-
- # -------------------------------------------------------------------------
- @classmethod
- def _apply_puppetsub_vmdef2vm(cls, vm, p_key, p_value, verbose=0):
-
- if isinstance(p_value, str):
- p_value_stripped = p_value.strip()
- else:
- p_value_stripped = str(p_value)
- if verbose > 2:
- LOG.debug(_("Evaluating sub key of {d!r}: {k!r} => {v!r}").format(
- d='puppet', k=p_key, v=p_value_stripped))
-
- if p_key.lower() == 'contact' and p_value_stripped:
- if cls.re_invalid_chars.search(p_value_stripped):
- LOG.error(_("Invalid puppet contact name {!r}.").format(p_value))
- else:
- vm.puppet_contact = p_value_stripped
- return
-
- if p_key.lower() == 'customer' and p_value_stripped:
- if cls.re_invalid_chars.search(p_value_stripped):
- LOG.error(_("Invalid puppet customer name {!r}.").format(p_value))
- else:
- vm.puppet_customer = p_value_stripped
- return
-
- if p_key.lower() == 'project' and p_value_stripped:
- if cls.re_invalid_chars.search(p_value_stripped):
- LOG.error(_("Invalid puppet customer project name {!r}.").format(p_value))
- else:
- vm.puppet_project = p_value_stripped
- return
-
- if p_key.lower() == 'role' and p_value_stripped:
- if cls.re_invalid_chars_role.search(p_value_stripped):
- LOG.error(_("Invalid puppet role {!r}.").format(p_value))
- else:
- vm.puppet_role = p_value_stripped
- return
-
- if cls.re_key_initial_install.search(p_key):
- vm.puppet_initial_install = p_value
- return
-
- if p_key.lower() == 'tier' and p_value_stripped:
- if cls.re_invalid_chars.search(p_value_stripped):
- LOG.error(_("Invalid puppet tier {!r}.").format(p_value))
- else:
- vm.puppet_tier = p_value_stripped
- return
-
- if cls.re_key_env.search(p_key) and p_value_stripped:
- if verbose > 2:
- LOG.debug(
- _("Setting Puppet environment to {!r}.").format(p_value_stripped))
- LOG.debug
- if cls.re_invalid_chars.search(p_value_stripped):
- LOG.error(_("Invalid puppet environment {!r}.").format(p_value))
- else:
- vm.puppet_env = p_value_stripped
-
- return
-
- # -------------------------------------------------------------------------
- def __copy__(self):
-
- if self.verbose > 2:
- n = self.name
- if self.is_template:
- tpl = _('Template')
- if n is None:
- n = tpl
- else:
- n += ' (' + tpl + ')'
- LOG.debug(_("Copying Terraform VM object {!r} ...").format(n))
-
- vm = self.__class__(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- simulate=self.simulate, force=self.force, initialized=self.initialized,
- terminal_has_colors=self.terminal_has_colors, name=self.name,
- is_template=self.is_template, fqdn=self.fqdn, folder=self.folder,
- num_cpus=self.num_cpus, memory=self.memory, boot_delay=self.boot_delay,
- cluster=self.cluster, ds_cluster=self.ds_cluster, datastore=self.datastore,
- ds_type=self.ds_type, customer=self.customer, purpose=self.purpose,
- vm_template=self.vm_template, puppet_contact=self.puppet_contact,
- puppet_customer=self.puppet_customer, puppet_tier=self.puppet_tier,
- puppet_env=self.puppet_env, puppet_role=self.puppet_role, nameservers=self.nameservers,
- searchdomains=self.searchdomains, dns_options=self.dns_options,
- rootdisk_size=self.rootdisk_size, has_backup=self.has_backup,
- has_puppet=self.has_puppet, puppet_project=self.puppet_project,
- puppet_initial_install=self.puppet_initial_install, vsphere=self.vsphere,
- )
-
- vm.disks = copy.copy(self.disks)
-
- vm.interfaces = []
- for interface in self.interfaces:
- vm.interfaces.append(copy.copy(interface))
-
- return vm
-
- # -------------------------------------------------------------------------
- @classmethod
- def _get_ns_list(cls, nameservers):
-
- if not isinstance(nameservers, Iterable):
- raise ValueError(_("Parameter {p} {ns!r} is not iterable.").format(
- p='nameservers', ns=nameservers))
-
- ns = []
- i = 1
- for val in nameservers:
- try:
- address = ipaddress.ip_address(val)
- if i > cls.max_nameservers:
- LOG.warn(_(
- "There are at most {mx} nameservers accepted, {addr} "
- "will not be considered.").format(
- mx=cls.max_nameservers, addr=address))
- elif address not in ns:
- ns.append(address)
- i += 1
- except Exception as e:
- LOG.error(_("Invalid nameserver address {v!r}: {e}").format(
- v=val, e=e))
-
- return ns
-
- # -------------------------------------------------------------------------
- @classmethod
- def _get_searchdomain_list(cls, searchdomains):
-
- if not isinstance(searchdomains, Iterable):
- raise ValueError(_("Parameter {p} {ns!r} is not iterable.").format(
- p='searchdomains', ns=searchdomains))
-
- domains = []
- i = 1
- for dom in searchdomains:
- if i > cls.max_searchdomains:
- LOG.warn(_(
- "There are at most {mx} search domains accepted, {srv} "
- "will not be considered.").format(
- mx=cls.max_searchdomains, srv=dom))
- elif dom not in domains:
- domains.append(dom)
- i += 1
-
- return domains
-
- # -------------------------------------------------------------------------
- @classmethod
- def guess_rhel(cls, vm):
- """Trying to guess, whether the VM to deploy should be a RHEL instance."""
-
- if not vm.vm_template:
- msg = _(
- "The VM {!r} was no VMware template assigned, assuming the VM "
- "should become a RHEL instance.").format(vm.fqdn)
- LOG.warn(msg)
- return True
-
- ret = False
- if cls.re_rhel_template.search(vm.vm_template):
- ret = True
-
- msg = _("Guessing the VM {fqdn!r} should become a RHEL instance: {ret!r}").format(
- fqdn=vm.fqdn, ret=ret)
- LOG.debug(msg)
-
- return ret
-
- # -----------------------------------------------------------
- @property
- def is_template(self):
- """A flag indicating, that this is a template instance."""
- return self._is_template
-
- @is_template.setter
- def is_template(self, value):
- self._is_template = bool(value)
-
- # -----------------------------------------------------------
- @property
- def puppet_initial_install(self):
- """Set the initial_install flag for Puppet."""
- return self._puppet_initial_install
-
- @puppet_initial_install.setter
- def puppet_initial_install(self, value):
- self._puppet_initial_install = to_bool(value)
-
- # -----------------------------------------------------------
- @property
- def has_backup(self):
- """A flag indicating, that the VM should run the backup client."""
- return self._has_backup
-
- @has_backup.setter
- def has_backup(self, value):
- self._has_backup = bool(value)
-
- # -----------------------------------------------------------
- @property
- def has_puppet(self):
- """A flag indicating, that the VM should ishould be managed by puppet."""
- return self._has_puppet
-
- @has_puppet.setter
- def has_puppet(self, value):
- self._has_puppet = bool(value)
-
- # -----------------------------------------------------------
- @property
- def fqdn(self):
- """The FQDN of the VM to define. May be Non in case of template instances."""
- return self._fqdn
-
- @fqdn.setter
- def fqdn(self, value):
- if value is None:
- self._fqdn = None
- return
-
- val = str(value).strip().lower()
- if val == '':
- self._fqdn = None
- return
-
- if not RE_FQDN.search(val):
- msg = _("The hostname {!r} is no a valid FQDN.").format(value)
- raise ValueError(msg)
-
- if self.re_fqdn_dot_at_end.search(val):
- msg = _("The hostname {!r} may not end with a dot '.'.").format(value)
- raise ValueError(msg)
-
- self._fqdn = val
-
- # -----------------------------------------------------------
- @property
- def name(self):
- """The name of the VM - if it is no template, then the FQDN."""
- if self.is_template:
- return self._name
- return self._fqdn
-
- @name.setter
- def name(self, value):
- if value is None:
- if not self.is_template:
- self._name = None
- return
- msg = _("The name of a template VM may not be None.")
- raise TerraformVmDefinitionError(msg)
-
- val = str(value).strip()
- if val == '':
- if not self.is_template:
- self._name = None
- return
- msg = _("The name of a template VM may not be empty.")
- raise TerraformVmDefinitionError(msg)
-
- self._name = val
-
- # -----------------------------------------------------------
- @property
- def vsphere(self):
- """The name of the VSPhere from configuration, in which
- the VM should be created."""
- return self._vsphere
-
- @vsphere.setter
- def vsphere(self, value):
- if value is None:
- msg = _("The name of the VSPhere may not be None.")
- raise TerraformVmDefinitionError(msg)
-
- val = str(value).strip()
- if val == '':
- msg = _("The name of the VSPhere may not be empty.")
- raise TerraformVmDefinitionError(msg)
-
- self._vsphere = val
-
- # -----------------------------------------------------------
- @property
- def tf_name(self):
- """The name of the VM how used in terraform."""
- if self.name is None:
- return None
- return 'vm_' + RE_TF_NAME.sub('_', self.name.lower())
-
- # -----------------------------------------------------------
- @property
- def hostname(self):
- """The base hostname of the VM (without domain)."""
- if self._fqdn is None:
- return None
- return self._fqdn.split('.')[0]
-
- # -----------------------------------------------------------
- @property
- def domain(self):
- """The domain part of the host FQDN."""
- if self._fqdn is None:
- return None
- return '.'.join(self._fqdn.split('.')[1:])
-
- # -----------------------------------------------------------
- @property
- def num_cpus(self):
- """Number of CPUs of the VM (num_cores_per_socket is always 1)."""
- return self._num_cpus
-
- @num_cpus.setter
- def num_cpus(self, value):
- val = int(value)
- if val < 1 or val > self.max_num_cpus:
- msg = _(
- "Invalid number of CPUs {n} - number must be "
- "{min} <= NUMBER <= {max}.").format(n=val, min=1, max=self.max_num_cpus)
- raise ValueError(msg)
- self._num_cpus = val
-
- # -----------------------------------------------------------
- @property
- def memory(self):
- """Memory of the VM in MiBytes, must be a multiple of 256."""
- return self._memory
-
- @memory.setter
- def memory(self, value):
-
- value = str(value)
- if self.verbose > 2:
- LOG.debug(_("Trying to detect memory from value {!r}.").format(value))
-
- match = self.re_memory_value.search(value)
- if not match:
- raise ValueError(_("Invalid memory {!r}.").format(value))
- val_raw = match.group(1)
- unit = match.group(2)
- if unit:
- val_raw = "{v} {u}".format(v=val_raw, u=unit)
- else:
- val_raw += ' MiB'
-
- val = human2mbytes(val_raw)
- if val < self.memory_chunk or val > self.max_memory:
- msg = _("Invalid memory {m} - memory must be {min} <= MiBytes <= {max}.").format(
- m=val, min=self.memory_chunk, max=self.max_memory)
- raise ValueError(msg)
- modulus = val % self.memory_chunk
- if modulus:
- msg = _("Invalid memory {m}, must be a multipe of {c}.").format(
- m=val, c=self.memory_chunk)
- raise ValueError(msg)
- self._memory = val
-
- # -----------------------------------------------------------
- @property
- def cluster(self):
- """The name of the computing cluster, where the VM should be instantiated."""
- return self._cluster
-
- @cluster.setter
- def cluster(self, value):
- if value is None:
- msg = _("The name of the computing cluster of the VM may not be None.")
- raise TerraformVmDefinitionError(msg)
-
- val = str(value).strip()
- if val == '':
- msg = _("The name of the computing cluster of the VM may not be empty.")
- raise TerraformVmDefinitionError(msg)
-
- self._cluster = val
-
- # -----------------------------------------------------------
- @property
- def folder(self):
- """The VM folder of the VM in VSphere."""
- return self._folder
-
- @folder.setter
- def folder(self, value):
- if value is None:
- LOG.warn(_("A folder name may not be None."))
- return
-
- val = str(value).strip()
- if val == '':
- LOG.warn(_("A folder name may not be empty."))
- return
- self._folder = val
-
- # -----------------------------------------------------------
- @property
- def boot_delay(self):
- """Boot delay in seconds of the VM."""
- return self._boot_delay
-
- @boot_delay.setter
- def boot_delay(self, value):
- val = float(value)
- if val < 0 or val > self.max_boot_delay:
- msg = _(
- "Invalid boot delay {b:0.1} - delay must be "
- "{min} <= NUMBER <= {max}.").format(b=val, min=0, max=self.max_boot_delay)
- raise ValueError(msg)
- self._boot_delay = val
-
- # -----------------------------------------------------------
- @property
- def ds_cluster(self):
- """An optional defined datastore cluster."""
- return self._ds_cluster
-
- @ds_cluster.setter
- def ds_cluster(self, value):
- if value is None:
- self._ds_cluster = None
- return
- val = str(value).strip()
- if val == '':
- self._ds_cluster = None
- return
- self._ds_cluster = val
-
- # -----------------------------------------------------------
- @property
- def datastore(self):
- """An optional defined datastore."""
- return self._datastore
-
- @datastore.setter
- def datastore(self, value):
- if value is None:
- self._datastore = None
- return
- val = str(value).strip()
- if val == '':
- self._datastore = None
- return
- self._datastore = val
-
- # -----------------------------------------------------------
- @property
- def ds_type(self):
- """The type of the datastore (SATA,SAS or SSD).
- Used for autoexpolring."""
- return self._ds_type
-
- @ds_type.setter
- def ds_type(self, value):
- if value is None:
- self._ds_type = None
- return
- val = str(value).strip().lower()
- if val == '':
- self._ds_type = None
- return
- if val not in DS_TYPES:
- msg = _("Datastore type {t!r} not allowed, valid datastore types are: {li}").format(
- t=value, li=DS_TYPES)
- raise ValueError(msg)
- self._ds_type = val
-
- # -----------------------------------------------------------
- @property
- def customer(self):
- """The customer of the VM in VSphere."""
- return self._customer
-
- @customer.setter
- def customer(self, value):
- if value is None:
- LOG.warn(_("A customer name may not be None."))
- return
-
- val = str(value).strip()
- if val == '':
- LOG.warn(_("A customer name may not be empty."))
- return
- self._customer = val
-
- # -----------------------------------------------------------
- @property
- def owner(self):
- """The customer of the VM in VSphere for /etc/motd."""
- return self._customer
-
- # -----------------------------------------------------------
- @property
- def purpose(self):
- """The purpose of the VM in VSphere."""
- return self._purpose
-
- @purpose.setter
- def purpose(self, value):
- if value is None:
- LOG.warn(_("A purpose may not be None."))
- return
-
- val = str(value).strip()
- if val == '':
- LOG.warn(_("A purpose may not be empty."))
- return
- self._purpose = val
-
- # -----------------------------------------------------------
- @property
- def vm_template(self):
- """The name of the VM or template in VSphere to use as template."""
- return self._vm_template
-
- @vm_template.setter
- def vm_template(self, value):
- if value is None:
- LOG.warn(_("A template VM name may not be None."))
- return
-
- val = str(value).strip()
- if val == '':
- LOG.warn(_("A template VM name may not be empty."))
- return
- self._vm_template = val
-
- # -----------------------------------------------------------
- @property
- def puppet_contact(self):
- """The name or address of the contact for the VM."""
- return self._puppet_contact
-
- @puppet_contact.setter
- def puppet_contact(self, value):
- if value is None:
- LOG.warn(_("A puppet contact name may not be None."))
- return
-
- val = str(value).strip()
- if val == '':
- LOG.warn(_("A puppet contact name may not be empty."))
- return
- self._puppet_contact = val
-
- # -----------------------------------------------------------
- @property
- def puppet_customer(self):
- """The name of the puppet hiera customer for the VM."""
- return self._puppet_customer
-
- @puppet_customer.setter
- def puppet_customer(self, value):
- if value is None:
- LOG.warn(_("A puppet hiera customer name may not be None."))
- return
-
- val = str(value).strip()
- if val == '':
- LOG.warn(_("A puppet hiera customer name may not be empty."))
- return
- if '/' in val:
- LOG.error(_("A puppet hiera customer name may not contain a slash (/) character."))
- return
- self._puppet_customer = val
-
- # -----------------------------------------------------------
- @property
- def puppet_project(self):
- """The name of the puppet customer project for the VM."""
- return self._puppet_project
-
- @puppet_project.setter
- def puppet_project(self, value):
- if value is None:
- LOG.warn(_("A puppet hiera project name should not be None."))
- self._puppet_project = None
- return
-
- val = str(value).strip().lower()
- if val == '':
- LOG.warn(_("A puppet hiera customer project may not be empty."))
- return
- if '/' in val:
- LOG.error(_("A puppet hiera customer project may not contain a slash (/) character."))
- return
- self._puppet_project = val
-
- # -----------------------------------------------------------
- @property
- def hiera_customer(self):
- """The name of the hiera customer for the VM."""
- return self._puppet_customer
-
- @hiera_customer.setter
- def hiera_customer(self, value):
- self.puppet_customer = value
-
- # -----------------------------------------------------------
- @property
- def hiera_project(self):
- """The name of the customer project for the VM."""
- return self._puppet_project
-
- @hiera_project.setter
- def hiera_project(self, value):
- self.puppet_project = value
-
- # -----------------------------------------------------------
- @property
- def puppet_role(self):
- """The name of the puppet role for the VM."""
- return self._puppet_role
-
- @puppet_role.setter
- def puppet_role(self, value):
- if value is None:
- LOG.warn(_("A puppet role may not be None."))
- return
-
- val = str(value).strip()
- if val == '':
- LOG.warn(_("A puppet role may not be empty."))
- return
- self._puppet_role = val
-
- # -----------------------------------------------------------
- @property
- def puppet_tier(self):
- """The name of the puppet tier of the VM."""
- return self._puppet_tier
-
- @puppet_tier.setter
- def puppet_tier(self, value):
- if value is None:
- LOG.warn(_("A puppet tier name may not be None."))
- return
-
- val = str(value).strip().lower()
- if val == '':
- LOG.warn(_("A puppet tier name may not be empty."))
- return
-
- if val not in PUPPET_TIERS:
- LOG.warn(_("A puppet tier should be one of {li} (given: {v!r}).").format(
- li=pp(PUPPET_TIERS), v=value))
-
- self._puppet_tier = val
-
- # -----------------------------------------------------------
- @property
- def puppet_env(self):
- """The name of the puppet environment of the VM."""
- if self._puppet_env is not None:
- return self._puppet_env
- if self.is_template:
- return None
- return self.puppet_tier
-
- @puppet_env.setter
- def puppet_env(self, value):
- if value is None:
- return
-
- val = str(value).strip().lower()
- if val == '':
- self._puppet_env = None
- return
-
- if val not in PUPPET_ENVIRONMENTS:
- LOG.warn(_("A puppet environment should be one of {li} (given: {v!r}).").format(
- li=pp(PUPPET_ENVIRONMENTS), v=value))
-
- self._puppet_env = val
-
- # -----------------------------------------------------------
- @property
- def puppet_environment(self):
- """The name of the puppet environment of the VM."""
- return self.puppet_env
-
- @puppet_environment.setter
- def puppet_environment(self, value):
- self.puppet_env = value
-
- # -----------------------------------------------------------
- @property
- def rootdisk_size(self):
- """Size of the root disk of the VM in GiB."""
- return self._rootdisk_size
-
- @rootdisk_size.setter
- def rootdisk_size(self, value):
- val = float(value)
- msg = _(
- "Invalid root disk size {n} - size must be "
- "{min} <= SIZE <= {max}.").format(
- n=val, min=self.min_rootdisk_size, max=self.max_rootdisk_size)
- if val < self.min_rootdisk_size or val > self.max_rootdisk_size:
- raise ValueError(msg)
- self._rootdisk_size = val
-
- # -----------------------------------------------------------
- @property
- def already_existing(self):
- """The Virtual machine is already existing in VSphere."""
- return self._already_existing
-
- @already_existing.setter
- def already_existing(self, value):
- self._already_existing = to_bool(value)
-
- # -----------------------------------------------------------
- @property
- def is_rhel(self):
- """A flag indicating, that the VM should ishould be managed by puppet."""
- return self._is_rhel
-
- @is_rhel.setter
- def is_rhel(self, value):
- if value is None:
- self._is_rhel = None
- return
- self._is_rhel = bool(value)
-
- # -------------------------------------------------------------------------
- def as_dict(self, short=True):
- """
- Transforms the elements of the object into a dict
-
- @param short: don't include local properties in resulting dict.
- @type short: bool
-
- @return: structure as dict
- @rtype: dict
- """
-
- res = super(TerraformVm, self).as_dict(short=short)
- res['already_existing'] = self.already_existing
- res['boot_delay'] = self.boot_delay
- res['cluster'] = self.cluster
- res['customer'] = self.customer
- res['datastore'] = self.datastore
- res['domain'] = self.domain
- res['ds_cluster'] = self.ds_cluster
- res['ds_type'] = self.ds_type
- res['folder'] = self.folder
- res['fqdn'] = self.fqdn
- res['has_backup'] = self.has_backup
- res['has_puppet'] = self.has_puppet
- res['hiera_customer'] = self.hiera_customer
- res['hiera_project'] = self.hiera_project
- res['hostname'] = self.hostname
- res['interfaces'] = []
- res['is_rhel'] = self.is_rhel
- res['is_template'] = self.is_template
- res['memory'] = self.memory
- res['name'] = self.name
- res['num_cpus'] = self.num_cpus
- res['owner'] = self.owner
- res['puppet_contact'] = self.puppet_contact
- res['puppet_customer'] = self.puppet_customer
- res['puppet_project'] = self.puppet_project
- res['puppet_env'] = self.puppet_env
- res['puppet_environment'] = self.puppet_environment
- res['puppet_role'] = self.puppet_role
- res['puppet_tier'] = self.puppet_tier
- res['puppet_initial_install'] = self.puppet_initial_install
- res['purpose'] = self.purpose
- res['rootdisk_size'] = self.rootdisk_size
- res['tf_name'] = self.tf_name
- res['vm_template'] = self.vm_template
- res['vsphere'] = self.vsphere
-
- for interface in self.interfaces:
- res['interfaces'].append(interface.as_dict(short=short))
-
- return res
-
- # -------------------------------------------------------------------------
- def apply_root_disk(self):
-
- if self.verbose > 2:
- LOG.debug(_("Resetting root disk."))
-
- disk_name = 'disk0'
-
- disk = TerraformDisk(
- name=disk_name, root_disk=True, unit_number=0, size_gb=self.rootdisk_size,
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- initialized=True)
-
- self.disks[disk_name] = disk
-
- # -------------------------------------------------------------------------
- def _add_data_disk(self, disk_def, name, unit_number=1):
-
- params = {
- 'name': name,
- 'unit_number': unit_number,
- }
-
- for key in disk_def.keys():
- val = disk_def[key]
- if self.re_disk_size.search(key) and val:
- params['size_gb'] = val
- elif self.re_disk_vgname.search(key):
- params['vg_name'] = val
- elif self.re_disk_lvname.search(key):
- params['lv_name'] = val
- elif self.re_disk_mountpoint.search(key):
- params['mountpoint'] = val
- elif self.re_disk_fstype.search(key) and val:
- params['fs_type'] = val
-
- if self.verbose > 2:
- LOG.debug(_("Using parameters for init data disk:") + "\n" + pp(params))
-
- params['appname'] = self.appname
- params['verbose'] = self.verbose
- params['base_dir'] = self.base_dir
-
- disk = TerraformDisk(**params)
- disk.initialized = True
- if self.verbose > 2:
- LOG.debug(_("Got data disk {!r}:").format(name) + "\n" + pp(disk.as_dict()))
- self.disks[name] = disk
-
- # -------------------------------------------------------------------------
- def _get_disk_unit(self, current_disk_nr=0):
- """Tries to evaluate the disk_unit my the current number in the list."""
- disks_per_ctrlr = TerraformDisk.disks_per_scsi_ctrlr
- max_scsi_ctrlrs = TerraformDisk.max_scsi_ctrlrs
- max_disks = TerraformDisk.max_scsi_disks
-
- if self.verbose > 2:
- LOG.debug(_("Trying to get unit_id of disk number {}.").format(current_disk_nr))
-
- if current_disk_nr >= max_disks:
- raise TerraformVmTooManyDisksError(current_disk_nr + 1, max_disks)
-
- ctrlr_id = current_disk_nr % max_scsi_ctrlrs
- id_offset = current_disk_nr // max_scsi_ctrlrs
- unit_id = ctrlr_id * disks_per_ctrlr + id_offset
-
- if self.verbose > 1:
- LOG.debug(_("Got unit_id {id} for disk number {nr} (controller ID {cid}).").format(
- id=unit_id, nr=current_disk_nr, cid=ctrlr_id))
-
- return unit_id
-
-
-# =============================================================================
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2024 by Frank Brehm, Berlin
-@summary: A handler module for underlaying actions
-"""
-from __future__ import absolute_import, print_function
-
-# Standard module
-import logging
-
-HAS_GETCH = False
-try:
- import getch
- HAS_GETCH = True
-except ImportError:
- pass
-
-# Third party modules
-from fb_tools.common import to_str
-
-__version__ = '0.1.0'
-LOG = logging.getLogger(__name__)
-
-
-# =============================================================================
-def password_input_getch(prompt='', fill_char='*', max_len=64):
- p_s = ''
- proxy_string = ' ' * 64
-
- while True:
-
- print('\r' + proxy_string, end='', flush=True)
- print('\r' + prompt, end='', flush=True)
-
- c = getch.getch()
- if c == b'\r' or c == b'\n':
- break
- elif c == b'\x08':
- if len(p_s):
- p_s = p_s[:-1]
- continue
-
- p_s += to_str(c)
- if len(p_s) >= max_len:
- break
-
- print('', flush=True)
- return p_s
-
-
-# =============================================================================
-def password_input(prompt='', fill_char='*', max_len=64):
-
- if HAS_GETCH:
- return password_input_getch(prompt=prompt, fill_char=fill_char, max_len=max_len)
-
- import getpass
-
- return getpass.getpass(prompt=prompt)
-
-
-# =============================================================================
-
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2024 by Frank Brehm, Berlin
-@summary: A module for providing a configuration
-"""
-from __future__ import absolute_import
-
-# Standard module
-import logging
-
-# Third party modules
-from fb_tools.obj import FbBaseObject
-
-from fb_tools.common import is_sequence
-
-# Own modules
-from .errors import CrTfConfigError
-
-from .xlate import XLATOR
-
-__version__ = '1.8.1'
-LOG = logging.getLogger(__name__)
-
-_ = XLATOR.gettext
-ngettext = XLATOR.ngettext
-
-
-# =============================================================================
-class VsphereConfig(FbBaseObject):
- """Class for encapsulation of config data of a connection to a VSPhere center."""
-
- default_host = 'vcs01.ppbrln.internal'
- default_port = 443
- default_dc = 'vmcc'
- default_cluster = 'vmcc-l105-01'
- default_min_root_size_gb = 20.0
- default_guest_id = 'centos8_64Guest'
-
- default_template_name = 'rhel9-template'
-
- # -------------------------------------------------------------------------
- def __init__(
- self, appname=None, verbose=0, version=__version__, base_dir=None, name=None,
- host=None, port=None, user=None, password=None, dc=None, cluster=None,
- template_name=None, min_root_size_gb=None, excluded_ds=None, guest_id=None,
- rhsm_user=None, rhsm_password=None, initialized=False):
-
- self._name = None
- self._host = self.default_host
- self._port = self.default_port
- self._user = None
- self._password = None
- self._dc = self.default_dc
- self._cluster = self.default_cluster
- self._template_name = self.default_template_name
- self._min_root_size_gb = self.default_min_root_size_gb
- self._guest_id = self.default_guest_id
- self.excluded_ds = []
- self.used_templates = []
-
- super(VsphereConfig, self).__init__(
- appname=appname, verbose=verbose, version=version,
- base_dir=base_dir, initialized=False,
- )
-
- if name is not None:
- self.name = name
- if host is not None:
- self.host = host
- if port is not None:
- self.port = port
- if user is not None:
- self.user = user
- if password is not None:
- self.password = password
- if dc is not None:
- self.dc = dc
- if cluster is not None:
- self.cluster = cluster
- if template_name is not None:
- self.template_name = template_name
- if min_root_size_gb is not None:
- self.min_root_size_gb = min_root_size_gb
- if guest_id is not None:
- self.guest_id = guest_id
-
- if excluded_ds:
- if is_sequence(excluded_ds):
- for ds in excluded_ds:
- self.excluded_ds.append(str(ds))
- else:
- self.excluded_ds.append(str(excluded_ds))
-
- if initialized:
- self.initialized = True
-
- # -----------------------------------------------------------
- @property
- def name(self):
- """The name of the VSphere."""
- return self._name
-
- @name.setter
- def name(self, value):
- if value is None:
- self._name = None
- return
- val = str(value).strip().lower()
- if val == '':
- self._name = None
- else:
- self._name = val
-
- # -----------------------------------------------------------
- @property
- def host(self):
- """The host name or address of the VSphere server."""
- return self._host
-
- @host.setter
- def host(self, value):
- if value is None:
- self._host = self.default_host
- return
- val = str(value).strip().lower()
- if val == '':
- self._host = None
- else:
- self._host = val
-
- # -----------------------------------------------------------
- @property
- def port(self):
- """The TCP port number, where the API is listening on the VSphere server."""
- return self._port
-
- @port.setter
- def port(self, value):
- if value is None:
- self._port = self.default_port
- return
- val = self.default_port
- try:
- val = int(value)
- if val < 1:
- msg = _("a port may not be less than 1: {}.").format(val)
- raise CrTfConfigError(msg)
- max_val = (2 ** 16) - 1
- if val > max_val:
- msg = _("a port may not be greater than {m}: {v}.").format(
- m=max_val, v=val)
- raise CrTfConfigError(msg)
- except ValueError as e:
- msg = _("Wrong port number {v!r}: {e}").format(v=value, e=e)
- LOG.error(msg)
- else:
- self._port = val
-
- # -----------------------------------------------------------
- @property
- def user(self):
- """The user name to connect to the VSphere server."""
- return self._user
-
- @user.setter
- def user(self, value):
- if value is None:
- self._user = None
- return
- val = str(value).strip()
- if val == '':
- self._user = None
- else:
- self._user = val
-
- # -----------------------------------------------------------
- @property
- def password(self):
- """The password of the VSphere user."""
- return self._password
-
- @password.setter
- def password(self, value):
- if value is None:
- self._password = None
- return
- val = str(value)
- if val == '':
- self._password = None
- else:
- self._password = val
-
- # -----------------------------------------------------------
- @property
- def dc(self):
- """The name of the datacenter in VSphere."""
- return self._dc
-
- @dc.setter
- def dc(self, value):
- if value is None:
- self._dc = self.default_dc
- return
- val = str(value).strip()
- if val == '':
- self._dc = self.default_dc
- else:
- self._dc = val
-
- # -----------------------------------------------------------
- @property
- def cluster(self):
- """The name of the default cluster in VSphere."""
- return self._cluster
-
- @cluster.setter
- def cluster(self, value):
- if value is None:
- self._cluster = self.default_cluster
- return
- val = str(value).strip()
- if val == '':
- self._cluster = self.default_cluster
- else:
- self._cluster = val
-
- # -----------------------------------------------------------
- @property
- def template_name(self):
- """The name of the default cluster in VSphere."""
- return self._template_name
-
- @template_name.setter
- def template_name(self, value):
- if value is None:
- self._template_name = self.default_template_name
- return
- val = str(value).strip().lower()
- if val == '':
- self._template_name = self.default_template_name
- else:
- self._template_name = val
-
- # -----------------------------------------------------------
- @property
- def min_root_size_gb(self):
- """The minimum size of a root disk in GiB."""
- return self._min_root_size_gb
-
- @min_root_size_gb.setter
- def min_root_size_gb(self, value):
- if value is None:
- self._min_root_size_gb = self.default_min_root_size_gb
- return
- val = self.default_min_root_size_gb
- try:
- val = float(value)
- if val < 10:
- msg = _("may not be less than 10: {:0.1f}.").format(val)
- raise CrTfConfigError(msg)
- max_val = 4 * 1024
- if val > max_val:
- msg = _("may not be greater than {m}: {v:0.1f}.").format(
- m=max_val, v=val)
- raise CrTfConfigError(msg)
- except ValueError as e:
- msg = _("Wrong minimum root size in GiB {v!r}: {e}").format(v=value, e=e)
- LOG.error(msg)
- else:
- self._min_root_size_gb = val
-
- # -----------------------------------------------------------
- @property
- def guest_id(self):
- """The Id of the Guest OS in VSphere."""
- return self._guest_id
-
- @guest_id.setter
- def guest_id(self, value):
- if value is None:
- self._guest_id = self.default_guest_id
- return
- val = str(value).strip()
- if val == '':
- self._guest_id = self.default_guest_id
- else:
- self._guest_id = val
-
- # -------------------------------------------------------------------------
- def as_dict(self, short=True, show_secrets=False):
- """
- Transforms the elements of the object into a dict
-
- @param short: don't include local properties in resulting dict.
- @type short: bool
-
- @return: structure as dict
- @rtype: dict
- """
-
- res = super(VsphereConfig, self).as_dict(short=short)
-
- res['name'] = self.name
- res['host'] = self.host
- res['port'] = self.port
- res['user'] = self.user
- res['dc'] = self.dc
- res['cluster'] = self.cluster
- res['template_name'] = self.template_name
- res['min_root_size_gb'] = self.min_root_size_gb
- res['guest_id'] = self.guest_id
-
- if self.password:
- if show_secrets or self.verbose > 4:
- res['password'] = self.password
- else:
- res['password'] = '*******'
- else:
- res['password'] = None
-
- return res
-
- # -------------------------------------------------------------------------
- def __copy__(self):
-
- vsphere = self.__class__(
- appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
- initialized=self.initialized, host=self.host, port=self.port, user=self.user,
- password=self.password, dc=self.dc, cluster=self.cluster,
- template_name=self.template_name, excluded_ds=self.excluded_ds,
- min_root_size_gb=self.min_root_size_gb, guest_id=self.guest_id,
- )
- return vsphere
-
- # -------------------------------------------------------------------------
- def __eq__(self, other):
-
- if self.verbose > 4:
- LOG.debug(_("Comparing {} objects ...").format(self.__class__.__name__))
-
- if not isinstance(other, VsphereConfig):
- return False
-
- if self.name != other.name:
- return False
- if self.host != other.host:
- return False
- if self.port != other.port:
- return False
- if self.user != other.user:
- return False
- if self.password != other.password:
- return False
- if self.dc != other.dc:
- return False
- if self.cluster != other.cluster:
- return False
- if self.template_name != other.template_name:
- return False
- if self.min_root_size_gb != other.min_root_size_gb:
- return False
- if self.guest_id != other.guest_id:
- return False
- if self.excluded_ds != other.excluded_ds:
- return False
-
- return True
-
- # -------------------------------------------------------------------------
- def is_valid(self, raise_on_error=False):
-
- name = '<{}>'.format(_('unknown'))
- if self.name:
- name = self.name
- if self.verbose > 1:
- LOG.debug(_("Checking validity of {o}-object {n!r} ...").format(
- o=self.__class__.__name__, n=name))
-
- error_lst = []
-
- mandatory_attribs = ('name', 'host', 'dc', 'cluster')
- requested_attribs = ('user', 'password')
-
- for attrib in mandatory_attribs:
- cur_val = getattr(self, attrib, None)
- if not cur_val:
- msg = _("Attribute {a!r} of the {o}-object {n!r} is not set.").format(
- a=attrib, o=self.__class__.__name__, n=name)
- error_lst.append(msg)
- if not raise_on_error:
- LOG.error(msg)
-
- if error_lst:
- if raise_on_error:
- nr = len(error_lst)
- msg = ngettext(
- 'Found an error in VSPhere configuration',
- 'Found {} errors in VSPhere configuration', nr)
- msg = msg.format(nr) + '\n * ' + '\n * '.join(error_lst)
- raise CrTfConfigError(msg)
- return False
-
- for attrib in requested_attribs:
- cur_val = getattr(self, attrib, None)
- if not cur_val:
- msg = _(
- "Attribute {a!r} of the {o}-object {n!r} is not set, it "
- "will be requestet during this script and on starting terraform.").format(
- a=attrib, o=self.__class__.__name__, n=name)
- LOG.warn(msg)
-
- return True
-
-
-# =============================================================================
-
-if __name__ == "__main__":
-
- pass
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@author: Frank Brehm
-@contact: frank.brehm@pixelpark.com
-@copyright: © 2024 by Frank Brehm, Berlin
-@summary: The module for i18n.
- It provides translation object, usable from all other
- modules in this package.
-"""
-from __future__ import absolute_import, print_function
-
-# Standard modules
-import logging
-import gettext
-
-from pathlib import Path
-
-# Third party modules
-from babel.support import Translations
-
-DOMAIN = 'create_terraform'
-
-LOG = logging.getLogger(__name__)
-
-__version__ = '1.0.3'
-
-__me__ = Path(__file__).resolve()
-__module_dir__ = __me__.parent
-__lib_dir__ = __module_dir__.parent
-__base_dir__ = __lib_dir__.parent
-LOCALE_DIR = __base_dir__.joinpath('locale')
-if not LOCALE_DIR.is_dir():
- LOCALE_DIR = __module_dir__.joinpath('locale')
- if not LOCALE_DIR.is_dir():
- LOCALE_DIR = None
-
-__mo_file__ = gettext.find(DOMAIN, str(LOCALE_DIR))
-if __mo_file__:
- try:
- with open(__mo_file__, 'rb') as F:
- XLATOR = Translations(F, DOMAIN)
- except FileNotFoundError:
- XLATOR = gettext.NullTranslations()
-else:
- XLATOR = gettext.NullTranslations()
-
-_ = XLATOR.gettext
-
-# =============================================================================
-
-if __name__ == "__main__":
-
- print(_("Module directory: {!r}").format(__module_dir__))
- print(_("Base directory: {!r}").format(__base_dir__))
- print(_("Locale directory: {!r}").format(LOCALE_DIR))
- print(_("Locale domain: {!r}").format(DOMAIN))
- print(_("Found .mo-file: {!r}").format(__mo_file__))
-
-# =============================================================================
-
-# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
--- /dev/null
+#!/bin/env python3
+# -*- coding: utf-8 -*-
+
+__version__ = '1.8.10'
+
+MIN_VERSION_TERRAFORM = '1.6.5'
+MAX_VERSION_TERRAFORM = '1.9.0'
+
+MIN_VERSION_VSPHERE_PROVIDER = '2.5.1'
+
+CFGFILE_BASENAME = 'create-terraform.ini'
+
+# vim: ts=4 et list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2024 by Frank Brehm, Berlin
+@summary: The module for the application object.
+"""
+from __future__ import absolute_import
+
+# Standard modules
+import sys
+import os
+import logging
+import re
+import argparse
+import signal
+
+from pathlib import Path
+
+# Third party modules
+from fb_tools.common import pp
+from fb_tools.app import BaseApplication
+from fb_tools.errors import ExpectedHandlerError, CommandNotFoundError
+from fb_tools.config import ConfigError
+from fb_tools.common import generate_password
+
+# Own modules
+from . import __version__ as __pkg_version__
+from . import CFGFILE_BASENAME
+
+from .errors import TerraformHandlerError
+
+from .config import CrTfConfiguration
+
+from .handler import CreateTerraformHandler
+
+from .terraform.vm import TerraformVm
+
+from .xlate import __module_dir__ as __xlate_module_dir__
+from .xlate import __base_dir__ as __xlate_base_dir__
+from .xlate import __mo_file__ as __xlate_mo_file__
+from .xlate import XLATOR, LOCALE_DIR, DOMAIN
+
+__version__ = '1.3.3'
+LOG = logging.getLogger(__name__)
+
+SIGNAL_NAMES = {
+ signal.SIGHUP: 'HUP',
+ signal.SIGINT: 'INT',
+ signal.SIGABRT: 'ABRT',
+ signal.SIGTERM: 'TERM',
+ signal.SIGKILL: 'KILL',
+ signal.SIGQUIT: 'QUIT',
+ signal.SIGUSR1: 'USR1',
+ signal.SIGUSR2: 'USR2',
+}
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class CfgFileOptionAction(argparse.Action):
+
+ # -------------------------------------------------------------------------
+ def __init__(self, option_strings, *args, **kwargs):
+
+ super(CfgFileOptionAction, self).__init__(
+ option_strings=option_strings, *args, **kwargs)
+
+ # -------------------------------------------------------------------------
+ def __call__(self, parser, namespace, values, option_string=None):
+
+ if values is None:
+ setattr(namespace, self.dest, None)
+ return
+
+ path = Path(values)
+ if not path.exists():
+ msg = _("File {!r} does not exists.").format(values)
+ raise argparse.ArgumentError(self, msg)
+ if not path.is_file():
+ msg = _("File {!r} is not a regular file.").format(values)
+ raise argparse.ArgumentError(self, msg)
+
+ setattr(namespace, self.dest, path.resolve())
+
+
+# =============================================================================
+class YamlFileOptionAction(argparse.Action):
+
+ # -------------------------------------------------------------------------
+ def __init__(self, option_strings, *args, **kwargs):
+
+ super(YamlFileOptionAction, self).__init__(
+ option_strings=option_strings, *args, **kwargs)
+
+ # -------------------------------------------------------------------------
+ def __call__(self, parser, namespace, values, option_string=None):
+
+ yaml_file_paths = []
+
+ for value in values:
+ path = Path(value)
+ if not path.exists():
+ msg = _("File {!r} does not exists.").format(values)
+ raise argparse.ArgumentError(self, msg)
+ if not path.is_file():
+ msg = _("File {!r} is not a regular file.").format(values)
+ raise argparse.ArgumentError(self, msg)
+ yaml_file_paths.append(path.resolve())
+
+ setattr(namespace, self.dest, yaml_file_paths)
+
+
+# =============================================================================
+class StopStepOptionAction(argparse.Action):
+
+ # -------------------------------------------------------------------------
+ def __init__(self, option_strings, *args, **kwargs):
+
+ super(StopStepOptionAction, self).__init__(
+ option_strings=option_strings, *args, **kwargs)
+
+ # -------------------------------------------------------------------------
+ def __call__(self, parser, namespace, values, option_string=None):
+
+ step = values
+ if step == '?':
+ width = 1
+ for step in CreateTerraformHandler.steps:
+ if len(step) > width:
+ width = len(step)
+
+ print("\n" + _("The following steps to interrupt the execution after are available:"))
+
+ for step in CreateTerraformHandler.steps:
+ desc = _('<no description>')
+ if step in CreateTerraformHandler.step_desc:
+ desc = CreateTerraformHandler.step_desc[step]
+ line = ' * {step:<{width}} {desc}'.format(
+ step=step, width=width, desc=desc)
+ print(line)
+
+ print()
+ sys.exit(0)
+
+ setattr(namespace, self.dest, step)
+
+
+# =============================================================================
+class CrTfApplication(BaseApplication):
+ """
+ Class for the application objects.
+ """
+
+ show_simulate_option = True
+
+ re_prefix = re.compile(r'^[a-z0-9][a-z0-9_]*$', re.IGNORECASE)
+ re_anum = re.compile(r'[^A-Z0-9_]+', re.IGNORECASE)
+ fake_root_passwd = generate_password(12)
+
+ # -------------------------------------------------------------------------
+ def __init__(
+ self, appname=None, verbose=0, version=__pkg_version__, base_dir=None,
+ terminal_has_colors=False, initialized=False, usage=None, description=None,
+ argparse_epilog=None, argparse_prefix_chars='-', env_prefix=None):
+
+ self.yaml_file = None
+ self.config = None
+ self.handler = None
+ self._cfg_dir = None
+ self._cfg_file = None
+
+ desc = _(
+ "Creates or updates a directory with a terraform environment "
+ "on base of a given YAML file.")
+
+ super(CrTfApplication, self).__init__(
+ appname=appname, verbose=verbose, version=version, base_dir=base_dir,
+ description=desc, terminal_has_colors=terminal_has_colors, initialized=False,
+ )
+
+ # -------------------------------------------------------------------------
+ @property
+ def cfg_dir(self):
+ """Directory of the configuration file."""
+ return self._cfg_dir
+
+ # -------------------------------------------------------------------------
+ @property
+ def cfg_file(self):
+ """Configuration file."""
+ return self._cfg_file
+
+ # -------------------------------------------------------------------------
+ def _search_cfg_file(self):
+
+ search_dirs = []
+ search_dirs.append(self.base_dir.parent)
+ search_dirs.append(self.base_dir.parent / 'etc')
+ search_dirs.append(self.base_dir / 'etc')
+
+ for sdir in search_dirs:
+ cfg_file = sdir / CFGFILE_BASENAME
+ LOG.debug(_("Searching for config file {!r} ...").format(str(cfg_file)))
+ if cfg_file.exists() and cfg_file.is_file():
+ self._cfg_dir = sdir
+ self._cfg_file = cfg_file
+ return
+ self._cfg_dir = self.base_dir / 'etc'
+ self._cfg_file = self.base_dir.parent / CFGFILE_BASENAME
+
+ # -------------------------------------------------------------------------
+ def _warn_about_missing_cfg(self):
+
+ cur_dir = Path.cwd()
+
+ default_conf_file = self.cfg_dir / (CFGFILE_BASENAME + '.default')
+ default_cfg_file_rel = os.path.relpath(str(default_conf_file), str(cur_dir))
+
+ cfg1 = self.base_dir.parent / CFGFILE_BASENAME
+ cfg1 = os.path.relpath(str(cfg1), str(cur_dir))
+
+ cfg2 = self.base_dir.parent / 'etc' / CFGFILE_BASENAME
+ cfg2 = os.path.relpath(str(cfg2), str(cur_dir))
+
+ cfg3 = self.cfg_dir / CFGFILE_BASENAME
+ cfg3 = os.path.relpath(str(cfg3), str(cur_dir))
+
+ # cfg_file_rel = os.path.relpath(str(self.cfg_file), str(cur_dir))
+ msg = (_(
+ "Config file {f!r} not found, using defaults.\n"
+ "To avoid this message, you may copy {d!r} to {c1!r}, {c2!r} or {c3!r} "
+ "and fill out all necessary entries, e.g. the passwords and API keys.").format(
+ f=CFGFILE_BASENAME, d=default_cfg_file_rel, c1=cfg1, c2=cfg2, c3=cfg3))
+ LOG.warn(msg)
+
+ # -------------------------------------------------------------------------
+ def _read_cfg(self):
+
+ if self.cfg_file.exists():
+ try:
+ self.config.read()
+ except ConfigError as e:
+ LOG.error(_("Error in configuration:") + " " + str(e))
+ self.exit(1)
+ if self.verbose > 3:
+ LOG.debug(_("Read configuration:") + '\n' + pp(self.config.as_dict()))
+
+ # -------------------------------------------------------------------------
+ def post_init(self):
+ """
+ Method to execute before calling run(). Here could be done some
+ finishing actions after reading in commandline parameters,
+ configuration a.s.o.
+
+ This method could be overwritten by descendant classes, these
+ methhods should allways include a call to post_init() of the
+ parent class.
+ """
+
+ self.initialized = False
+
+ self.init_logging()
+
+ self._search_cfg_file()
+
+ self.perform_arg_parser()
+
+ if not self.cfg_file.exists():
+ self._warn_about_missing_cfg()
+
+ self.config = CrTfConfiguration(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ config_file=self.cfg_file)
+
+ self.config.init_vsphere_defaults()
+
+ self._read_cfg()
+
+ if self.config.verbose > self.verbose:
+ self.verbose = self.config.verbose
+ if self.config.simulate:
+ self.simulate = True
+
+ self.config.initialized = True
+
+ if self.config.puppet_envs_delete:
+ LOG.debug(_("Removing allowed puppet environments ..."))
+ for env in self.config.puppet_envs_delete:
+ if env in TerraformVm.valid_puppet_environments:
+ if self.verbose > 1:
+ LOG.debug(_("Removing puppet environment {!r} ...").format(env))
+ TerraformVm.valid_puppet_environments.remove(env)
+
+ if self.config.puppet_envs_add:
+ LOG.debug(_("Adding allowed puppet environments ..."))
+ for env in self.config.puppet_envs_add:
+ if env not in TerraformVm.valid_puppet_environments:
+ if self.verbose > 1:
+ LOG.debug(_("Adding puppet environment {!r} ...").format(env))
+ TerraformVm.valid_puppet_environments.append(env)
+
+ TerraformVm.valid_puppet_environments.sort()
+ LOG.debug(
+ _("Allowed puppet environments:") + ' ' + pp(TerraformVm.valid_puppet_environments))
+
+ self.perform_arg_parser_rest()
+
+ if not self.config.pdns_api_key:
+ url = 'http'
+ if self.config.pdns_api_use_https:
+ url += 's'
+ url += '://' + self.config.pdns_master_server
+ url += ':{}'.format(self.config.pdns_api_port)
+ if self.config.pdns_api_path_prefix:
+ url += self.config.pdns_api_path_prefix
+ prompt = self.colored(_('PowerDNS API key for {!r}').format(url), 'AQUA')
+ print('')
+ self.config.pdns_api_key = self.get_secret(
+ prompt=prompt, item_name=self.colored(_('PowerDNS API key'), 'AQUA'))
+ print('')
+
+# if not self.config.vm_root_password:
+# # Using faked root password, because it is currently not used.
+# # TODO: When the root password is used, then substitute fake password
+# # by prompting for the real root password.
+# LOG.debug(_(
+# "Using faked root password {!r} - "
+# "but this is currently not used.").format(self.fake_root_passwd))
+# self.config.vm_root_password = self.fake_root_passwd
+
+ self.handler = CreateTerraformHandler(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ simulate=self.simulate, force=self.force, config=self.config,
+ terminal_has_colors=self.terminal_has_colors)
+
+ if self.args.stop_after:
+ self.handler.stop_at_step = self.args.stop_after
+
+ self.handler.set_tz(self.config.tz_name)
+
+ try:
+ self.handler.init_handlers()
+ except (CommandNotFoundError, ExpectedHandlerError) as e:
+ LOG.error(str(e))
+ self.exit(5)
+ self.handler.initialized = True
+
+ self.initialized = True
+
+ # -------------------------------------------------------------------------
+ def as_dict(self, short=True):
+ """
+ Transforms the elements of the object into a dict
+
+ @param short: don't include local properties in resulting dict.
+ @type short: bool
+
+ @return: structure as dict
+ @rtype: dict
+ """
+
+ res = super(CrTfApplication, self).as_dict(short=short)
+
+ res['cfg_dir'] = self.cfg_dir
+ res['cfg_file'] = self.cfg_file
+ res['__pkg_version__'] = __pkg_version__
+ res['config'] = None
+ if self.config:
+ res['config'] = self.config.as_dict(short=short, show_secrets=self.force)
+
+ if 'xlate' not in res:
+ res['xlate'] = {}
+ res['xlate'][DOMAIN] = {
+ '__module_dir__': __xlate_module_dir__,
+ '__base_dir__': __xlate_base_dir__,
+ 'LOCALE_DIR': LOCALE_DIR,
+ 'DOMAIN': DOMAIN,
+ '__mo_file__': __xlate_mo_file__,
+ }
+
+ return res
+
+ # -------------------------------------------------------------------------
+ def init_arg_parser(self):
+ """
+ Public available method to initiate the argument parser.
+ """
+
+ super(CrTfApplication, self).init_arg_parser()
+
+ cur_dir = Path(os.getcwd())
+
+ default_cfg_file = self.base_dir.joinpath('etc').joinpath(self.appname + '.ini')
+ default_cfg_file_rel = Path(os.path.relpath(str(default_cfg_file), str(cur_dir)))
+
+ steps = list(CreateTerraformHandler.steps[:]) + ['?']
+
+ self.arg_parser.add_argument(
+ '-S', '--stop-after', metavar=_('STEP'), dest='stop_after', choices=steps,
+ action=StopStepOptionAction,
+ help=_(
+ "Name of the step, where to interrupt the execution of this script. "
+ "Use {!r} to show a list of all avaliable steps.").format('--stop-after ?')
+ )
+
+ self.arg_parser.add_argument(
+ '-c', '--config', '--config-file', dest='cfg_file', metavar=_('FILE'),
+ action=CfgFileOptionAction,
+ help=_("Configuration file (default: {!r})").format(str(default_cfg_file_rel))
+ )
+
+ # PowerDNS options
+ pdns_group = self.arg_parser.add_argument_group(_('PowerDNS options'))
+
+ pdns_group.add_argument(
+ '--no-pdns', action="store_true", dest='no_pdns',
+ help=_(
+ "Don't execute any PowerDNS checks or actions. In this case it's on yours "
+ "to ensure existence of all necessary IP addresses.")
+ )
+
+ pdns_group.add_argument(
+ '-M', '--pdns-master', metavar=_("HOST"), dest='pdns_master',
+ help=_(
+ "The hostname or address of the PowerDNS master server "
+ "(Default: {!r}).").format(CrTfConfiguration.default_pdns_master_server)
+ )
+
+ pdns_group.add_argument(
+ '--api-port', metavar=_("PORT"), type=int, dest="pdns_api_port",
+ help=_("The port number of the PowerDNS API (Default: {}).").format(
+ CrTfConfiguration.default_pdns_api_port)
+ )
+
+ pdns_group.add_argument(
+ '--api-key', metavar=_("KEY"), dest="pdns_api_key",
+ help=_("The key accessing to the PDNS API.")
+ )
+
+ pdns_group.add_argument(
+ '--api-https', action="store_true", dest="pdns_api_https",
+ help=_("Should PDNS API requests executed per HTTPS?"),
+ )
+
+ pdns_group.add_argument(
+ '--api-prefix', metavar=_("PATH"), dest='pdns_api_prefix',
+ help=_("The path prefix in the URL for PDNS API requests (Default: {!r}).").format(
+ CrTfConfiguration.default_pdns_api_path_prefix)
+ )
+
+ # Positional arguments
+ self.arg_parser.add_argument(
+ "yaml_file", nargs=1, metavar=_("YAML_FILE"), action=YamlFileOptionAction,
+ help=_("The YAML-file with the definition of the VMs to create with terraform."),
+ )
+
+ # -------------------------------------------------------------------------
+ def perform_arg_parser(self):
+
+ if self.args.cfg_file:
+ self._cfg_file = Path(self.args.cfg_file)
+ if not self.cfg_file.is_absolute():
+ self._cfg_file = self.cfg_file.resolve()
+
+ # -------------------------------------------------------------------------
+ def perform_arg_parser_rest(self):
+ """
+ Public available method to execute some actions after parsing
+ the command line parameters.
+ """
+
+ self.perform_arg_parser_pdns()
+
+ self.yaml_file = Path(self.args.yaml_file[0])
+ if not self.yaml_file.is_absolute():
+ self.yaml_file = self.yaml_file.resolve()
+
+ # -------------------------------------------------------------------------
+ def perform_arg_parser_pdns(self):
+
+ if self.args.no_pdns:
+ self.config.no_pdns = True
+ if self.args.pdns_master:
+ self.config.pdns_master_server = self.args.pdns_master
+ if self.args.pdns_api_port:
+ self.config.pdns_api_port = self.args.pdns_api_port
+ if self.args.pdns_api_key:
+ self.config.pdns_api_key = self.args.pdns_api_key
+ if self.args.pdns_api_https:
+ self.config.pdns_api_use_https = True
+ if self.args.pdns_api_prefix:
+ self.config.pdns_api_path_prefix = self.args.pdns_api_prefix
+
+ # -------------------------------------------------------------------------
+ def _run(self):
+ """Main routine."""
+
+ LOG.info(_("Starting {a!r}, version {v!r} ...").format(
+ a=self.appname, v=__pkg_version__))
+
+ try:
+ if self.handler.first_call(self.yaml_file):
+ self.verify_vsphere_credentials()
+ self.handler(self.yaml_file)
+ except ExpectedHandlerError as e:
+ self.handler = None
+ self.handle_error(str(e), _("Create Terraform environment"))
+ self.exit(5)
+
+ # -------------------------------------------------------------------------
+ def verify_vsphere_credentials(self):
+
+ if not self.handler:
+ raise TerraformHandlerError(_("No handler object available."))
+
+ need_nl = False
+
+ if not self.handler.vsphere_user:
+
+ need_nl = True
+ msg = '\n' + _("Please input the {}.").format(self.colored(
+ _('vSphere user name'), 'AQUA'))
+ print(msg)
+ self.handler.vsphere_user = input(self.colored(_('Name'), 'AQUA') + ': ')
+ if not self.handler.vsphere_user:
+ msg = _("No {} given.").format(_('vSphere user name'))
+ raise ExpectedHandlerError(msg)
+
+ for vname in self.handler.vsphere.keys():
+ LOG.debug(_("Setting user for vSphere {vs!r} to {usr!r}.").format(
+ vs=vname, usr=self.handler.vsphere_user))
+ # Dirty, but else a change of fb_tools would be necessary (later)
+ self.handler.vsphere[vname]._user = self.handler.vsphere_user
+ print('')
+ need_nl = False
+
+ if not self.handler.vsphere_password:
+
+ # Get the name of the first (and hopefully only) VSphere
+ vname = None
+ for vn in self.handler.vsphere.keys():
+ vname = vn
+ break
+
+ if need_nl:
+ print('')
+ prompt = self.colored(_("User password of {!r}").format(
+ self.handler.vsphere_user), 'AQUA')
+ item = _('Password for user {u!r} of vSphere {n} on {h!r}').format(
+ u=self.handler.vsphere_user, n=vname, h=self.config.vsphere[vname].host)
+ item = self.colored(item, 'AQUA')
+ self.handler.vsphere_password = self.get_secret(prompt=prompt, item_name=item)
+ if not self.handler.vsphere_password:
+ msg = _("No {} given.").format(_('password of vSphere user'))
+ raise ExpectedHandlerError(msg)
+
+ for vname in self.handler.vsphere.keys():
+ LOG.debug(_("Setting passwort of vSphere {vs!r} user {usr!r}.").format(
+ vs=vname, usr=self.handler.vsphere_user))
+ # Dirty, but else a change of fb_tools would be necessary (later)
+ self.handler.vsphere[vname]._password = self.handler.vsphere_password
+ print('')
+ need_nl = False
+
+ if need_nl:
+ print('')
+
+ # -------------------------------------------------------------------------
+ def post_run(self):
+ """
+ Dummy function to run after the main routine.
+ Could be overwritten by descendant classes.
+
+ """
+
+ if self.verbose > 1:
+ LOG.info(_("Executing {} ...").format('post_run()'))
+
+ if self.handler:
+ self.handler = None
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2024 by Frank Brehm, Berlin
+@summary: A module for providing a configuration
+"""
+from __future__ import absolute_import
+
+# Standard module
+import logging
+import re
+
+# Third party modules
+import pytz
+
+from fb_tools.config import BaseConfiguration
+
+from fb_tools.common import to_bool, RE_FQDN, pp
+
+from fb_pdnstools import DEFAULT_PORT as DEFAULT_PDNS_API_PORT
+from fb_pdnstools import DEFAULT_TIMEOUT as DEFAULT_PDNS_API_TIMEOUT # noqa: F401
+from fb_pdnstools import DEFAULT_API_PREFIX as DEFAULT_PDNS_API_PREFIX
+from fb_pdnstools import DEFAULT_USE_HTTPS as DEFAULT_PDNS_API_USE_HTTPS
+
+# Own modules
+
+from .errors import CrTfConfigError
+
+from .vs_config import VsphereConfig
+
+from .xlate import XLATOR
+
+__version__ = '1.9.0'
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class CrTfConfiguration(BaseConfiguration):
+ """
+ A class for providing a configuration for the CrTfApplication class
+ and methods to read it from configuration files.
+ """
+
+ default_pdns_master_server = 'master.pp-dns.com'
+ default_pdns_api_port = DEFAULT_PDNS_API_PORT
+ default_pdns_api_use_https = bool(DEFAULT_PDNS_API_USE_HTTPS)
+ default_pdns_api_path_prefix = DEFAULT_PDNS_API_PREFIX
+ default_pdns_api_timeout = DEFAULT_PDNS_API_PORT
+ default_pdns_comment_account = 'provisioning'
+
+ default_rhsm_user = 'dpx-subscriber'
+
+ default_vsphere_defs = {
+ 'live': {
+ 'host': 'vcs01.ppbrln.internal',
+ 'port': 443,
+ 'dc': 'vmcc',
+ 'cluster': 'vmcc-l105-01',
+ },
+ 'test': {
+ 'host': 'test-vcsa01.pixelpark.net',
+ 'port': 443,
+ 'dc': 'test-vmcc',
+ 'cluster': 'test-vmcc-l105-01',
+ },
+ }
+
+ default_vsphere_tag_cat_os_id = 'OS'
+ default_vsphere_tag_cat_os_name = 'OS'
+ default_vsphere_tag_cat_os_desc = 'The operating system of a VM.'
+
+ default_vsphere_tag_os_rhel_id = 'os_rhel'
+ default_vsphere_tag_os_rhel_name = 'rhel'
+ default_vsphere_tag_os_rhel_desc = 'RedHat Enterprise Linux'
+
+ default_min_root_size_gb = 32.0
+ default_tz_name = 'Europe/Berlin'
+ default_guest_id = "oracleLinux7_64Guest"
+
+ default_disk_size = 10.0
+ default_root_min_size = 20.0
+ default_root_max_size = 512.0
+ default_disk_min_size = 4.0
+ default_disk_max_size = 1024.0
+
+ default_tf_backend_host = 'terraform.pixelpark.com'
+ default_tf_backend_scheme = 'https'
+ default_tf_backend_path_prefix = 'terraform'
+
+ re_list_split = re.compile(r'\s*[,:\s]+\s*')
+
+ default_puppetmaster = 'puppetmaster03.pixelpark.com'
+ default_puppetca = 'puppetca01.pixelpark.com'
+
+ msg_invalid_type = _("Invalid value {v!r} for {n!r} configuration ({f!r}:[{s}]): {e}")
+ msg_val_negative = _(
+ "Invalid value {v} for {n!r} configuration ({f!r}:[{s}]): "
+ "must be equal or greater than zero.")
+
+ max_pdns_api_timeout = 3600
+
+ re_excl_ds = re.compile(r'^\s*excluded?[-_]datastores?\s*$', re.IGNORECASE)
+ re_split_ds = re.compile(r'[,;\s]+')
+ re_template = re.compile(r'^\s*template(?:[-_\.]?name)?\s*$', re.IGNORECASE)
+ re_min_root_size = re.compile(
+ r'^\s*min[-_\.]?root[-_\.]?size(?:[-_\.]?gb)\s*$', re.IGNORECASE)
+ re_guest_id = re.compile(r'^\s*guest[-_]?id\s*$', re.IGNORECASE)
+
+ # -------------------------------------------------------------------------
+ def __init__(
+ self, appname=None, verbose=0, version=__version__, base_dir=None, simulate=False,
+ encoding=None, config_dir=None, config_file=None, initialized=False):
+
+ self.pdns_master_server = self.default_pdns_master_server
+ self.pdns_api_port = self.default_pdns_api_port
+ self._pdns_api_key = None
+ self._pdns_api_use_https = self.default_pdns_api_use_https
+ self._pdns_api_timeout = self.default_pdns_api_timeout
+ self.pdns_api_path_prefix = self.default_pdns_api_path_prefix
+ self.min_root_size_gb = self.default_min_root_size_gb
+ self._vm_root_password = None
+ self.tz_name = self.default_tz_name
+ self.guest_id = self.default_guest_id
+ self.puppetmaster = self.default_puppetmaster
+ self.puppetca = self.default_puppetca
+ self.pdns_comment_account = self.default_pdns_comment_account
+ self._rhsm_user = self.default_rhsm_user
+ self._rhsm_password = None
+
+ self._vsphere_tag_cat_os_id = self.default_vsphere_tag_cat_os_id
+ self._vsphere_tag_cat_os_name = self.default_vsphere_tag_cat_os_name
+ self._vsphere_tag_cat_os_desc = self.default_vsphere_tag_cat_os_desc
+
+ self._vsphere_tag_os_rhel_id = self.default_vsphere_tag_os_rhel_id
+ self._vsphere_tag_os_rhel_name = self.default_vsphere_tag_os_rhel_name
+ self._vsphere_tag_os_rhel_desc = self.default_vsphere_tag_os_rhel_desc
+
+ self._no_pdns = False
+
+ self.puppet_envs_add = set()
+ self.puppet_envs_delete = set()
+
+ self.vsphere = {}
+
+ self._disk_size = self.default_disk_size
+
+ self._root_min_size = self.default_root_min_size
+ self._root_max_size = self.default_root_max_size
+ self._disk_min_size = self.default_disk_min_size
+ self._disk_max_size = self.default_disk_max_size
+
+ self.tf_backend_host = self.default_tf_backend_host
+ self.tf_backend_scheme = self.default_tf_backend_scheme
+ self.tf_backend_path_prefix = self.default_tf_backend_path_prefix
+
+ self._simulate = False
+
+ self.excluded_datastores = []
+
+ super(CrTfConfiguration, self).__init__(
+ appname=appname, verbose=verbose, version=version, base_dir=base_dir,
+ encoding=encoding, config_dir=config_dir, config_file=config_file, initialized=False,
+ )
+
+ self.simulate = simulate
+
+ if initialized:
+ self.initialized = True
+
+ # -----------------------------------------------------------
+ @property
+ def simulate(self):
+ """A flag describing, that all should be simulated."""
+ return self._simulate
+
+ @simulate.setter
+ def simulate(self, value):
+ self._simulate = to_bool(value)
+
+ # -----------------------------------------------------------
+ @property
+ def no_pdns(self):
+ """Don't execute some PowerDNS actions or checks."""
+ return self._no_pdns
+
+ @no_pdns.setter
+ def no_pdns(self, value):
+ self._no_pdns = to_bool(value)
+
+ # -----------------------------------------------------------
+ @property
+ def pdns_api_key(self):
+ """The key used to authenticate against the PowerDNS API."""
+ return self._pdns_api_key
+
+ @pdns_api_key.setter
+ def pdns_api_key(self, value):
+ if value is None:
+ self._pdns_api_key = None
+ return
+ val = str(value)
+ if val == '':
+ self._pdns_api_key = None
+ else:
+ self._pdns_api_key = val
+
+ # -----------------------------------------------------------
+ @property
+ def pdns_api_use_https(self):
+ "Should HTTPS used for PDNS API calls."
+ return self._pdns_api_use_https
+
+ @pdns_api_use_https.setter
+ def pdns_api_use_https(self, value):
+ self._pdns_api_use_https = to_bool(value)
+
+ # -----------------------------------------------------------
+ @property
+ def pdns_api_timeout(self):
+ """The timeout in seconds for requesting the PowerDNS API."""
+ return self._pdns_api_timeout
+
+ @pdns_api_timeout.setter
+ def pdns_api_timeout(self, value):
+ if value is None:
+ self._pdns_api_timeout = self.default_pdns_api_timeout
+ return
+ val = int(value)
+ err_msg = _(
+ "Invalid timeout {t!r} for requesting the PowerDNS API, "
+ "must be 0 < SECONDS < {m}.")
+ if val <= 0 or val > self.max_pdns_api_timeout:
+ msg = err_msg.format(t=value, m=self.max_pdns_api_timeout)
+ raise ValueError(msg)
+ self._pdns_api_timeout = val
+
+ # -----------------------------------------------------------
+ @property
+ def vsphere_tag_cat_os_id(self):
+ """The terraform ID of the VSphere tag category 'OS'."""
+ return self._vsphere_tag_cat_os_id
+
+ # -----------------------------------------------------------
+ @property
+ def vsphere_tag_cat_os_name(self):
+ """The name of the VSphere OS tag category."""
+ return self._vsphere_tag_cat_os_name
+
+ # -----------------------------------------------------------
+ @property
+ def vsphere_tag_cat_os_desc(self):
+ """The description of the VSphere OS tag category."""
+ return self._vsphere_tag_cat_os_desc
+
+ # -----------------------------------------------------------
+ @property
+ def vsphere_tag_os_rhel_id(self):
+ """The terraform ID of the VSphere tag for OS RHEL."""
+ return self._vsphere_tag_os_rhel_id
+
+ # -----------------------------------------------------------
+ @property
+ def vsphere_tag_os_rhel_name(self):
+ """The name of the VSphere tag for OS RHEL."""
+ return self._vsphere_tag_os_rhel_name
+
+ # -----------------------------------------------------------
+ @property
+ def vsphere_tag_os_rhel_desc(self):
+ """The description of the VSphere tag for OS RHEL."""
+ return self._vsphere_tag_os_rhel_desc
+
+ # -----------------------------------------------------------
+ @property
+ def vm_root_password(self):
+ """The password of the VSphere user."""
+ return self._vm_root_password
+
+ @vm_root_password.setter
+ def vm_root_password(self, value):
+ if value is None:
+ self._vm_root_password = None
+ return
+ val = str(value)
+ if val == '':
+ self._vm_root_password = None
+ else:
+ self._vm_root_password = val
+
+ # -----------------------------------------------------------
+ @property
+ def disk_size(self):
+ """Default data disk size in GiB."""
+ return self._disk_size
+
+ @disk_size.setter
+ def disk_size(self, value):
+ if value is None:
+ msg = _("The default size of the data disk may not be None.")
+ raise TypeError(msg)
+ val = float(value)
+ if val < 1:
+ msg = _("The default size of the data disk must be greater or equal to one GB.")
+ raise ValueError(msg)
+ self._disk_size = val
+
+ # -----------------------------------------------------------
+ @property
+ def disk_min_size(self):
+ """Minimal data disk size in GiB."""
+ return self._disk_min_size
+
+ @disk_min_size.setter
+ def disk_min_size(self, value):
+ if value is None:
+ msg = _("The minimal size of the data disk may not be None.")
+ raise TypeError(msg)
+ val = float(value)
+ if val < 1:
+ msg = _("The minimal size of the data disk must be greater or equal to one GB.")
+ raise ValueError(msg)
+ self._disk_min_size = val
+
+ # -----------------------------------------------------------
+ @property
+ def disk_max_size(self):
+ """Maximal data disk size in GiB."""
+ return self._disk_max_size
+
+ @disk_max_size.setter
+ def disk_max_size(self, value):
+ if value is None:
+ msg = _("The maximal size of the data disk may not be None.")
+ raise TypeError(msg)
+ val = float(value)
+ if val < 1:
+ msg = _("The maximal size of the data disk must be greater or equal to one GB.")
+ raise ValueError(msg)
+ self._disk_max_size = val
+
+ # -----------------------------------------------------------
+ @property
+ def root_min_size(self):
+ """Minimal root disk size in GiB."""
+ return self._root_min_size
+
+ @root_min_size.setter
+ def root_min_size(self, value):
+ if value is None:
+ msg = _("The minimal size of the root disk may not be None.")
+ raise TypeError(msg)
+ val = float(value)
+ if val < 1:
+ msg = _("The minimal size of the root disk must be greater or equal to one GB.")
+ raise ValueError(msg)
+ self._root_min_size = val
+
+ # -----------------------------------------------------------
+ @property
+ def root_max_size(self):
+ """Maximal root disk size in GiB."""
+ return self._root_max_size
+
+ @root_max_size.setter
+ def root_max_size(self, value):
+ if value is None:
+ msg = _("The maximal size of the root disk may not be None.")
+ raise TypeError(msg)
+ val = float(value)
+ if val < 1:
+ msg = _("The maximal size of the root disk must be greater or equal to one GB.")
+ raise ValueError(msg)
+ self._root_max_size = val
+
+ # -----------------------------------------------------------
+ @property
+ def rhsm_user(self):
+ """The user used for subscribing the VM at RedHat."""
+ return self._rhsm_user
+
+ @rhsm_user.setter
+ def rhsm_user(self, value):
+ if value is None:
+ self._rhsm_user = self.default_rhsm_user
+ return
+ val = str(value).strip()
+ if val == '':
+ self._rhsm_user = self.default_rhsm_user
+ else:
+ self._rhsm_user = val
+
+ # -----------------------------------------------------------
+ @property
+ def rhsm_password(self):
+ """The password of the user used for subscribing the VM at RedHat."""
+ return self._rhsm_password
+
+ @rhsm_password.setter
+ def rhsm_password(self, value):
+ if value is None:
+ msg = _("The password of the user used for subscribing at RedHat may not be None.")
+ raise CrTfConfigError(msg)
+ val = str(value).strip()
+ if val == '':
+ msg = _("The password of the user used for subscribing at RedHat may not be empty.")
+ raise CrTfConfigError(msg)
+ self._rhsm_password = val
+
+ # -------------------------------------------------------------------------
+ def init_vsphere_defaults(self):
+
+ for vname in self.default_vsphere_defs.keys():
+
+ vs_data = self.default_vsphere_defs[vname]
+
+ params = {
+ 'appname': self.appname,
+ 'verbose': self.verbose,
+ 'base_dir': self.base_dir,
+ 'name': vname,
+ 'host': vs_data['host'],
+ 'port': vs_data['port'],
+ 'dc': vs_data['dc'],
+ 'cluster': vs_data['cluster'],
+ }
+
+ if self.verbose > 2:
+ msg = _("Creating a {}-object with parameters:").format('VsphereConfig')
+ msg += '\n' + pp(params)
+ LOG.debug(msg)
+ vsphere = VsphereConfig(**params)
+ if self.verbose > 2:
+ LOG.debug(_("Created object:") + '\n' + pp(vsphere.as_dict()))
+
+ self.vsphere[vname] = vsphere
+
+ # -------------------------------------------------------------------------
+ def as_dict(self, short=True, show_secrets=False):
+ """
+ Transforms the elements of the object into a dict
+
+ @param short: don't include local properties in resulting dict.
+ @type short: bool
+
+ @return: structure as dict
+ @rtype: dict
+ """
+
+ res = super(CrTfConfiguration, self).as_dict(short=short)
+
+ res['simulate'] = self.simulate
+ res['no_pdns'] = self.no_pdns
+ res['pdns_api_use_https'] = self.pdns_api_use_https
+ res['pdns_api_timeout'] = self.pdns_api_timeout
+ res['vm_root_password'] = None
+ res['pdns_api_key'] = None
+ res['disk_size'] = self.disk_size
+ res['disk_min_size'] = self.disk_min_size
+ res['disk_max_size'] = self.disk_max_size
+ res['root_min_size'] = self.root_min_size
+ res['root_max_size'] = self.root_max_size
+ res['rhsm_user'] = self.rhsm_user
+ res['vsphere_tag_cat_os_id'] = self.vsphere_tag_cat_os_id
+ res['vsphere_tag_cat_os_name'] = self.vsphere_tag_cat_os_name
+ res['vsphere_tag_cat_os_desc'] = self.vsphere_tag_cat_os_desc
+ res['vsphere_tag_os_rhel_id'] = self.vsphere_tag_os_rhel_id
+ res['vsphere_tag_os_rhel_name'] = self.vsphere_tag_os_rhel_name
+ res['vsphere_tag_os_rhel_desc'] = self.vsphere_tag_os_rhel_desc
+
+ res['vsphere'] = {}
+ for vsphere_name in self.vsphere.keys():
+ res['vsphere'][vsphere_name] = self.vsphere[vsphere_name].as_dict(
+ short=short, show_secrets=show_secrets)
+
+ if self.pdns_api_key:
+ if show_secrets or self.verbose > 4:
+ res['pdns_api_key'] = self.pdns_api_key
+ else:
+ res['pdns_api_key'] = '*******'
+
+ if self.vm_root_password:
+ if show_secrets or self.verbose > 4:
+ res['vm_root_password'] = self.vm_root_password
+ else:
+ res['vm_root_password'] = '*******'
+
+ if self.rhsm_password:
+ if show_secrets or self.verbose > 4:
+ res['rhsm_password'] = self.rhsm_password
+ else:
+ res['rhsm_password'] = '*******'
+ else:
+ res['rhsm_password'] = None
+
+ return res
+
+ # -------------------------------------------------------------------------
+ def eval_config_section(self, config, section_name):
+ """Evaluating of all found configuration options."""
+
+ if self.verbose > 2:
+ msg = _("Checking config section {!r}:").format(section_name)
+ LOG.debug(msg)
+
+ super(CrTfConfiguration, self).eval_config_section(config, section_name)
+
+ sn = section_name.lower()
+
+ if sn == 'vsphere' or sn.startswith('vsphere:'):
+ if sn.startswith('vsphere:'):
+ vsphere_name = sn.replace('vsphere:', '').strip()
+ if vsphere_name == '':
+ LOG.error(_("Empty VSPhere name found."))
+ else:
+ self.eval_config_vsphere(config, section_name, vsphere_name)
+ else:
+ self.eval_config_vsphere(config, section_name, '_default')
+ elif sn == 'powerdns' or sn == 'pdns':
+ self.eval_config_pdns(config, section_name)
+ elif sn == 'terraform':
+ self.eval_config_terraform(config, section_name)
+
+ # -------------------------------------------------------------------------
+ def eval_config_global(self, config, section_name):
+ """Evaluating section [global] of configuration.
+ May be overridden in descendant classes."""
+
+ super(CrTfConfiguration, self).eval_config_global(
+ config=config, section_name=section_name)
+
+ re_tz = re.compile(r'^\s*(?:tz|time[_-]?zone)\s*$', re.IGNORECASE)
+ re_puppetmaster = re.compile(r'^\s*puppet[_-]?master\s*$', re.IGNORECASE)
+ re_puppetca = re.compile(r'^\s*puppet[_-]?ca\s*$', re.IGNORECASE)
+ re_rhsm_user = re.compile(r'^\s*rhsm[_-]?user\s*$', re.IGNORECASE)
+ re_rhsm_password = re.compile(r'^\s*rhsm[_-]?password\s*$', re.IGNORECASE)
+
+ for (key, value) in config.items(section_name):
+ if key.lower() == 'simulate':
+ self.simulate = value
+ elif re_tz.search(key) and value.strip():
+ val = value.strip()
+ try:
+ tz = pytz.timezone(val) # noqa
+ except pytz.exceptions.UnknownTimeZoneError as e:
+ raise CrTfConfigError(self.msg_invalid_type.format(
+ f=self.config_file, s=section_name, v=value, n='time_zone', e=e))
+ self.tz_name = value.strip()
+ elif re_puppetmaster.search(key) and value.strip():
+ val = value.strip()
+ if not RE_FQDN.search(val):
+ raise CrTfConfigError(self.msg_invalid_type.format(
+ f=self.config_file, s=section_name, v=value, n='puppet_master',
+ e='Invalid Host FQDN for puppetmaster'))
+ self.puppetmaster = val.lower()
+ elif re_puppetca.search(key) and value.strip():
+ val = value.strip()
+ if not RE_FQDN.search(val):
+ raise CrTfConfigError(self.msg_invalid_type.format(
+ f=self.config_file, s=section_name, v=value, n='puppet_ca',
+ e='Invalid Host FQDN for puppetca'))
+ self.puppetca = val.lower()
+ elif re_rhsm_user.search(key) and value.strip():
+ self.rhsm_user = value.strip()
+ elif re_rhsm_password.search(key) and value.strip():
+ self.rhsm_password = value.strip()
+
+ # -------------------------------------------------------------------------
+ def eval_config_vsphere(self, config, section_name, vsphere_name):
+
+ if self.verbose > 2:
+ LOG.debug(_("Checking config section {s!r} ({n}) ...").format(
+ s=section_name, n=vsphere_name))
+
+ if vsphere_name in self.vsphere:
+ self.eval_config_existing_vsphere(config, section_name, vsphere_name)
+ else:
+ self.eval_config_new_vsphere(config, section_name, vsphere_name)
+
+ # -------------------------------------------------------------------------
+ def eval_config_new_vsphere(self, config, section_name, vsphere_name):
+
+ params = {
+ 'appname': self.appname,
+ 'verbose': self.verbose,
+ 'base_dir': self.base_dir,
+ 'name': vsphere_name,
+ }
+
+ for (key, value) in config.items(section_name):
+
+ if key.lower() == 'host' and value.strip():
+ params['host'] = value.strip()
+ elif key.lower() == 'port':
+ params['port'] = value
+ elif key.lower() == 'user' and value.strip():
+ params['user'] = value.strip()
+ elif key.lower() == 'password':
+ params['password'] = value
+ elif key.lower() == 'dc' and value.strip():
+ params['dc'] = value.strip()
+ elif key.lower() == 'cluster' and value.strip():
+ params['cluster'] = value.strip()
+ elif self.re_template.search(key) and value.strip():
+ params['template_name'] = value.strip()
+ elif self.re_excl_ds.search(key) and value.strip():
+ datastores = self.re_split_ds.split(value.strip())
+ params['excluded_ds'] = datastores
+ elif self.re_min_root_size.search(key) and value.strip():
+ params['min_root_size_gb'] = value
+ elif self.re_guest_id.search(key) and value.strip():
+ params['guest_id'] = value.strip()
+ else:
+ msg = _(
+ "Unknown configuration parameter {k!r} with value {v!r} for VSPhere {n!r} "
+ "found.").format(k=key, v=value, n=vsphere_name)
+ LOG.warning(msg)
+
+ if self.verbose > 2:
+ msg = _("Creating a {}-object with parameters:").format('VsphereConfig')
+ msg += '\n' + pp(params)
+ LOG.debug(msg)
+ vsphere = VsphereConfig(**params)
+ if self.verbose > 2:
+ LOG.debug(_("Created object:") + '\n' + pp(vsphere.as_dict()))
+
+ vsphere.is_valid(raise_on_error=True)
+
+ self.vsphere[vsphere_name] = vsphere
+
+ return
+
+ # -------------------------------------------------------------------------
+ def eval_config_existing_vsphere(self, config, section_name, vsphere_name):
+
+ vsphere = self.vsphere[vsphere_name]
+
+ for (key, value) in config.items(section_name):
+
+ if key.lower() == 'host' and value.strip():
+ vsphere.host = value.strip()
+ elif key.lower() == 'port':
+ vsphere.port = value
+ elif key.lower() == 'user' and value.strip():
+ vsphere.user = value.strip()
+ elif key.lower() == 'password':
+ vsphere.password = value
+ elif key.lower() == 'dc' and value.strip():
+ vsphere.dc = value.strip()
+ elif key.lower() == 'cluster' and value.strip():
+ vsphere.cluster = value.strip()
+ elif self.re_template.search(key) and value.strip():
+ vsphere.template_name = value.strip()
+ elif self.re_excl_ds.search(key) and value.strip():
+ datastores = self.re_split_ds.split(value.strip())
+ vsphere.datastores = datastores
+ elif self.re_min_root_size.search(key) and value.strip():
+ vsphere.min_root_size_gb = value.strip()
+ elif self.re_guest_id.search(key) and value.strip():
+ vsphere.guest_id = value.strip()
+ else:
+ msg = _(
+ "Unknown configuration parameter {k!r} with value {v!r} for VSPhere {n!r} "
+ "found.").format(k=key, v=value, n=vsphere_name)
+ LOG.warning(msg)
+
+ if self.verbose > 2:
+ LOG.debug(_("Updated object:") + '\n' + pp(vsphere.as_dict()))
+
+ vsphere.is_valid(raise_on_error=True)
+
+ self.vsphere[vsphere_name] = vsphere
+
+ return
+
+ # -------------------------------------------------------------------------
+ def eval_config_pdns(self, config, section):
+
+ if self.verbose > 2:
+ LOG.debug(_("Checking config section {!r} ...").format(section))
+
+ re_master = re.compile(
+ r'^\s*(?:master(?:[-_\.]?server)?|api(?:[-_\.]?(?:host|server)))\s*$', re.IGNORECASE)
+ re_port = re.compile(r'^\s*(?:api[-_\.]?)?port\s*$', re.IGNORECASE)
+ re_key = re.compile(r'^\s*(?:api[-_\.]?)?key\s*$', re.IGNORECASE)
+ re_use_https = re.compile(r'^\s*(?:api[-_\.]?)?(?:use[-_\.]?)?https\s*$', re.IGNORECASE)
+ re_prefix = re.compile(r'^\s*(?:api[-_\.]?)?(?:path[-_\.]?)?prefix\s*$', re.IGNORECASE)
+ re_comment_account = re.compile(r'^\s*comment[-_\.]?account\s*$', re.IGNORECASE)
+
+ for (key, value) in config.items(section):
+ if re_master.search(key) and value.strip():
+ self.pdns_master_server = value.strip().lower()
+ elif re_port.search(key) and value.strip():
+ val = 0
+ try:
+ val = int(value.strip())
+ except ValueError as e:
+ raise CrTfConfigError(self.msg_invalid_type.format(
+ f=self.config_file, s=section, v=value, n=key, e=e))
+ if val < 0:
+ raise CrTfConfigError(self.msg_val_negative.format(
+ f=self.config_file, s=section, v=value, n=key))
+ self.pdns_api_port = val
+ elif re_key.search(key) and value.strip():
+ self.pdns_api_key = value.strip()
+ elif re_use_https.search(key):
+ self.pdns_api_use_https = value
+ elif re_prefix.search(key) and value.strip():
+ self.pdns_api_path_prefix = value.strip()
+ elif key.lower() == 'timeout' and value.strip():
+ self.pdns_api_timeout = value.strip()
+ elif re_comment_account.search(key) and value.strip():
+ self.pdns_comment_account = value.strip()
+
+ return
+
+ # -------------------------------------------------------------------------
+ def eval_config_terraform(self, config, section):
+
+ if self.verbose > 2:
+ LOG.debug(_("Checking config section {!r} ...").format(section))
+
+ re_root_pw = re.compile(r'^\s*root[_-]?passw(?:ord)?\s*$', re.IGNORECASE)
+
+ re_disk_size = re.compile(r'^\s*(?:data[_-]?)?disk[_-]?size\s*$', re.IGNORECASE)
+
+ re_disk_min_size = re.compile(
+ r'^\s*(?:data[_-]?)?disk[_-]?min[_-]?size\s*$', re.IGNORECASE)
+ re_disk_max_size = re.compile(
+ r'^\s*(?:data[_-]?)?disk[_-]?max[_-]?size\s*$', re.IGNORECASE)
+ re_root_disk_min_size = re.compile(
+ r'^\s*root[_-]?disk[_-]?min[_-]?size\s*$', re.IGNORECASE)
+ re_root_disk_max_size = re.compile(
+ r'^\s*root[_-]?disk[_-]?max[_-]?size\s*$', re.IGNORECASE)
+
+ re_backend_host = re.compile(r'^\s*backend[_-]?host\s*$', re.IGNORECASE)
+ re_backend_scheme = re.compile(r'^\s*backend[_-]?scheme\s*$', re.IGNORECASE)
+ re_backend_path_prefix = re.compile(r'^\s*backend[_-]?path[_-]?prefix\s*$', re.IGNORECASE)
+
+ # re_list_split
+ re_puppet_envs = re.compile(r'^\s*puppet[_-]?env(?:ironment)?s?\s*$', re.IGNORECASE)
+ re_puppet_env = re.compile(r'^([+-])?([a-z](?:[a-z0-9_]*[a-z0-9])?)$', re.IGNORECASE)
+
+ for (key, value) in config.items(section):
+ if re_root_pw.search(key) and value.strip():
+ self.vm_root_password = value
+ elif re_disk_size.search(key):
+ self.disk_size = value
+ elif re_disk_min_size.search(key):
+ self.disk_min_size = value
+ elif re_disk_max_size.search(key):
+ self.disk_max_size = value
+ elif re_root_disk_min_size.search(key):
+ self.root_min_size = value
+ elif re_root_disk_max_size.search(key):
+ self.root_max_size = value
+ elif re_backend_host.search(key) and value.strip():
+ self.tf_backend_host = value.strip().lower()
+ elif re_backend_scheme.search(key) and value.strip():
+ self.tf_backend_scheme = value.strip().lower()
+ elif re_backend_path_prefix.search(key) and value.strip():
+ self.tf_backend_path_prefix = value.strip()
+ elif re_puppet_envs.search(key) and value.strip():
+ v = value.strip()
+ env_list = self.re_list_split.split(v)
+ for env in env_list:
+ match = re_puppet_env.match(env)
+ if not match:
+ msg = _("Invalid puppet environment {env!r} found in {k!r}.").format(
+ env=env, k=key)
+ LOG.warn(msg)
+ continue
+ sign = match.group(1)
+ val = match.group(2).lower()
+ if sign == '-':
+ self.puppet_envs_delete.add(val)
+ else:
+ self.puppet_envs_add.add(val)
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@summary: module for some common used error classes
+"""
+from __future__ import absolute_import
+
+# Standard modules
+
+
+# Own modules
+from fb_tools.errors import FbHandlerError, ExpectedHandlerError
+
+from fb_tools.config import ConfigError
+
+from .xlate import XLATOR
+
+__version__ = '1.3.0'
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class TerraformObjectError(FbHandlerError):
+ """Exception class on errors evaluation VM definition for terraform."""
+
+ pass
+
+
+# =============================================================================
+class TerraformHandlerError(TerraformObjectError):
+ """Exception because of handler errors."""
+
+ pass
+
+
+# =============================================================================
+class TerraformVSphereError(TerraformObjectError):
+ """Exception because of VSphere configuration errors."""
+
+ pass
+
+
+# =============================================================================
+class TerraformVmError(TerraformObjectError):
+ """Exception class on errors evaluation VM definition for terraform."""
+
+ pass
+
+
+# =============================================================================
+class TerraformVmDefinitionError(TerraformVmError):
+ """Exception class on errors evaluation VM definition for terraform."""
+
+ pass
+
+
+# =============================================================================
+class TerraformVmTooManyDisksError(TerraformVmDefinitionError):
+ """Exception class for the case, that too many disks should be connected to a VM."""
+
+ # -------------------------------------------------------------------------
+ def __init__(self, given_disks, max_disks=60):
+ """Initiate this exception class."""
+ self.given_disks = int(given_disks)
+ self.max_disks = int(max_disks)
+
+ # -------------------------------------------------------------------------
+ def __str__(self):
+ """Typecast into a string."""
+ msg = _(
+ "There should be too many disks ({gd}) assigned to a VM. "
+ "There are max. {maxd} disks allowed to assign to a VM.").format(
+ gd=self.given_disks, maxd=self.max_disks)
+ return msg
+
+
+# =============================================================================
+class NetworkNotExistingError(ExpectedHandlerError):
+ """Special error class for the case, if the expected network is not existing."""
+
+ # -------------------------------------------------------------------------
+ def __init__(self, net_name):
+
+ self.net_name = net_name
+
+ # -------------------------------------------------------------------------
+ def __str__(self):
+
+ msg = _("The network {!r} is not existing.").format(self.net_name)
+ return msg
+
+
+# =============================================================================
+class CrTfConfigError(ConfigError):
+ """Base error class for all exceptions happened during
+ evaluation of the cofiguration."""
+
+ pass
+
+
+# =============================================================================
+class AbortExecution(ExpectedHandlerError):
+ """Indicating an abort of the execution."""
+
+ # -------------------------------------------------------------------------
+ def __init__(self, step=None):
+
+ if step:
+ self.step = step
+ else:
+ self.step = _('<some unknown step>')
+
+ # -------------------------------------------------------------------------
+ def __str__(self):
+
+ return _("Aborting after {!r}.").format(self.step)
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2024 by Frank Brehm, Berlin
+@summary: A handler module for underlaying actions
+"""
+from __future__ import absolute_import, print_function
+
+# Standard module
+import os
+import logging
+import re
+import stat
+import copy
+
+from pathlib import Path
+
+from subprocess import PIPE
+
+from distutils.version import LooseVersion
+
+# Third party modules
+import pytz
+import six
+
+from fb_tools.common import pp, to_bool, to_str
+from fb_tools.errors import HandlerError, ExpectedHandlerError
+from fb_tools.handling_obj import HandlingObject, CalledProcessError
+from fb_tools.handler import BaseHandler
+
+# Own modules
+from .dns import CrTfHandlerDnsMixin
+from .files import CrTfHandlerFilesMixin
+from .first import CrTfHandlerFirstMixin
+from .read import CrTfHandlerReadMixin
+from .vmware import CrTfHandlerVmwMixin
+
+from .. import MIN_VERSION_TERRAFORM, MAX_VERSION_TERRAFORM
+from .. import MIN_VERSION_VSPHERE_PROVIDER
+
+from ..errors import AbortExecution
+
+# from ..tools import password_input
+
+from ..xlate import XLATOR
+
+__version__ = '3.10.0'
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class CreateTerraformHandler(
+ BaseHandler, CrTfHandlerFirstMixin, CrTfHandlerReadMixin, CrTfHandlerDnsMixin,
+ CrTfHandlerVmwMixin, CrTfHandlerFilesMixin):
+ """
+ A handler class for creating the terraform environment
+ """
+
+ re_default = re.compile(r'^\s*defaults?\s*$', re.IGNORECASE)
+ re_vm_key = re.compile(r'^\s*vms?\s*$', re.IGNORECASE)
+ re_group = re.compile(r'^\s*groups?\s*$', re.IGNORECASE)
+ re_group_name = re.compile(r'^\s*name\s*$', re.IGNORECASE)
+ re_doublequote = re.compile(r'"')
+
+ re_tf_version = re.compile(r'^\s*Terraform\s+v(\S+)', re.IGNORECASE)
+
+ std_file_permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
+ std_secure_file_permissions = stat.S_IRUSR | stat.S_IWUSR
+
+ sshkey_basename = 'id_rsa_cr_vmw_tpl'
+
+ open_opts = {}
+ if six.PY3:
+ open_opts['encoding'] = 'utf-8'
+ open_opts['errors'] = 'surrogateescape'
+
+ max_groups_depth = 10
+
+ tz_name = 'Europe/Berlin'
+ tz = pytz.timezone(tz_name)
+
+ steps = (
+ 'init', 'vmw-init', 'read-yaml', 'pdns-zones', 'vmw-test', 'collect-folders',
+ 'vmw-clusters', 'vmw-datastores', 'vmw-ds-clusters', 'vmw-networks', 'vmw-templates',
+ 'validate-yaml', 'validate-storage', 'validate-iface', 'validate-dns',
+ 'perform-dns', 'project-dir', 'tf-files', 'ensure-vmw-folders',
+ )
+ step_desc = {
+ 'init': _('After initialization of all objects and handlers.'),
+ 'vmw-init': _('After initialisation of VSPhere handlers.'),
+ 'read-yaml': _('After reading the given YAML file.'),
+ 'pdns-zones': _('After retrieving all DNS zones from PowerDNS.'),
+ 'vmw-test': _('After testing VSPhere handlers.'),
+ 'collect-folders': _('After collecting all VMWare and local folders.'),
+ 'vmw-clusters': _('After collecting all VMWare clusters.'),
+ 'vmw-datastores': _('After collecting all VMWare datastores.'),
+ 'vmw-ds-clusters': _('After collecting all VMWare datastore clusters.'),
+ 'vmw-networks': _('After collecting all VMWare networks.'),
+ 'vmw-templates': _('After validating all given VMWare templates.'),
+ 'validate-yaml': _('After syntax validating of data from loaded YAML file.'),
+ 'validate-storage': _('After validating all given storage data.'),
+ 'validate-iface': _('After validating all given network interface data.'),
+ 'validate-dns': _('After validating all given DNS data.'),
+ 'perform-dns': _('After performing all necessary actions in DNS.'),
+ 'project-dir': _('After ensuring availability of the project directory.'),
+ 'tf-files': _('After creation of the Terraform project files.'),
+ 'ensure-vmw-folders': _('After ensuring availability of VM folders in VMWare vSphere.'),
+ }
+
+ # -------------------------------------------------------------------------
+ def __init__(
+ self, appname=None, verbose=0, version=__version__, base_dir=None,
+ config=None, simulate=False, force=False, ignore_existing_dns=False,
+ terminal_has_colors=False, initialized=False):
+
+ self.pdns = None
+ self.vsphere = {}
+ self.config = None
+
+ self.terraform_cmd = None
+
+ self.yaml_data = None
+
+ self.default_vm = None
+ self.group_default_vms = {}
+
+ self.ignore_existing_dns = bool(ignore_existing_dns)
+
+ self.vms = []
+ self.vsphere_templates = {}
+
+ self.vm_names = []
+ self.fqdns = {}
+ self.addresses = {}
+
+ self.vsphere_folders = []
+
+ self.vsphere_user = None
+ self.vsphere_password = None
+
+ self.used_networks = {}
+ self.used_dc_clusters = {}
+ self.used_datastores = {}
+ self.project_dir = None
+ self.project_name = None
+
+ self._terraform_root_dir = None
+
+ self.all_vms = {}
+ self.existing_vms = []
+
+ self.start_dir = Path(os.getcwd())
+
+ self.script_dir = None
+ self.script_dir_rel = None
+ self.keys_dir = None
+ self.keys_dir_rel = None
+ self.private_key = None
+ self.private_key_rel = None
+
+ self._stop_at_step = None
+
+ self.min_version_terraform = None
+ if MIN_VERSION_TERRAFORM:
+ self.min_version_terraform = LooseVersion(MIN_VERSION_TERRAFORM)
+
+ self.max_version_terraform = None
+ if MAX_VERSION_TERRAFORM:
+ self.max_version_terraform = LooseVersion(MAX_VERSION_TERRAFORM)
+
+ self.min_version_vsphere_provider = None
+ if MIN_VERSION_VSPHERE_PROVIDER:
+ self.min_version_vsphere_provider = LooseVersion(MIN_VERSION_VSPHERE_PROVIDER)
+
+ self.dns_mapping = {
+ 'forward': [],
+ 'reverse': [],
+ }
+ self.dns_mappings2create = {
+ 'forward': [],
+ 'reverse': [],
+ }
+
+ self.updated_zones = []
+
+ self.eval_errors = 0
+
+ super(CreateTerraformHandler, self).__init__(
+ appname=appname, verbose=verbose, version=version, base_dir=base_dir,
+ simulate=simulate, force=force, terminal_has_colors=terminal_has_colors,
+ initialized=False,
+ )
+
+ if config:
+ self.config = config
+ if self.verbose >= 1:
+ msg = _("Given configuration:") + '\n' + pp(self.config.as_dict())
+ LOG.debug(msg)
+
+ self.script_dir = self.base_dir.joinpath('postinstall-scripts')
+ LOG.debug(_("Directory for postinstall scripts: {!r}.").format(str(self.script_dir)))
+ if not self.script_dir.exists():
+ msg = _("Directory for postinstall scripts {!r} does not exists.").format(
+ str(self.script_dir))
+ raise ExpectedHandlerError(msg)
+ if not self.script_dir.is_dir():
+ msg = _("Path {!r} for postinstall scripts exists, but is not a directory.").format(
+ str(self.script_dir))
+ raise ExpectedHandlerError(msg)
+
+ self.keys_dir = self.base_dir.joinpath('keys')
+ LOG.debug(_("Directory for SSH deploy keys: {!r}.").format(str(self.keys_dir)))
+ if not self.keys_dir.exists():
+ msg = _("Directory for SSH deploy keys {!r} does not exists.").format(
+ str(self.keys_dir))
+ raise ExpectedHandlerError(msg)
+ if not self.keys_dir.is_dir():
+ msg = _("Path {!r} for SSH deploy keys exists, but is not a directory.").format(
+ str(self.keys_dir))
+ raise ExpectedHandlerError(msg)
+
+ self.private_key = self.keys_dir / self.sshkey_basename
+ LOG.debug(_("Filename of the private SSH deploy key: {!r}").format(str(self.private_key)))
+ if not self.private_key.is_file():
+ msg = _(
+ "Private SSH deploy key file {!r} does not exists or is not a "
+ "regular file.").format(str(self.private_key))
+ raise ExpectedHandlerError(msg)
+
+ if initialized:
+ self.initialized = True
+
+ # -----------------------------------------------------------
+ @HandlingObject.simulate.setter
+ def simulate(self, value):
+ self._simulate = to_bool(value)
+
+ if self.initialized:
+ LOG.debug(_("Setting simulate of all subsequent objects to {!r} ...").format(
+ self.simulate))
+
+ if self.pdns:
+ self.pdns.simulate = self.simulate
+
+ for vsphere_name in self.vsphere.keys():
+ if self.vsphere[vsphere_name]:
+ self.vsphere[vsphere_name].simulate = self.simulate
+
+ # -----------------------------------------------------------
+ @property
+ def stop_at_step(self):
+ """Step, at which the execution should be interrupted."""
+ return self._stop_at_step
+
+ @stop_at_step.setter
+ def stop_at_step(self, value):
+ if value is None:
+ self._stop_at_step = None
+ return
+ v = str(value).strip().lower().replace('_', '-')
+ if v == '':
+ self._stop_at_step = None
+ return
+ if v not in self.steps:
+ msg = _("Invalid step name {!r} for interrupting execution.").format(value)
+ raise ValueError(msg)
+ self._stop_at_step = v
+
+ # -----------------------------------------------------------
+ @property
+ def terraform_root_dir(self):
+ """Root directory of all terraform directories."""
+ if self.is_venv:
+ return self.base_dir.parent
+ return self._terraform_root_dir
+
+ # -----------------------------------------------------------
+ @property
+ def full_project_name(self):
+ """Complete project name with parent paths."""
+ if not self.project_name:
+ return None
+ if not self.project_dir:
+ return None
+ if not self.terraform_root_dir:
+ return self.project_name
+ return os.path.relpath(str(self.project_dir), self.terraform_root_dir)
+
+ # -------------------------------------------------------------------------
+ def as_dict(self, short=True):
+ """
+ Transforms the elements of the object into a dict
+
+ @param short: don't include local properties in resulting dict.
+ @type short: bool
+
+ @return: structure as dict
+ @rtype: dict
+ """
+
+ res = super(CreateTerraformHandler, self).as_dict(short=short)
+ res['std_file_permissions'] = "{:04o}".format(self.std_file_permissions)
+ res['std_secure_file_permissions'] = "{:04o}".format(self.std_secure_file_permissions)
+ res['open_opts'] = self.open_opts
+ res['stop_at_step'] = self.stop_at_step
+ res['steps'] = copy.copy(self.steps)
+ res['tz_name'] = self.tz_name
+ res['terraform_root_dir'] = self.terraform_root_dir
+ res['full_project_name'] = self.full_project_name
+ res['vsphere'] = {}
+ for vsphere_name in self.vsphere.keys():
+ res['vsphere'][vsphere_name] = self.vsphere[vsphere_name].as_dict(short=short)
+
+ return res
+
+ # -------------------------------------------------------------------------
+ @classmethod
+ def set_tz(cls, tz_name):
+
+ if not tz_name.strip():
+ raise ValueError(_("Invalid time zone name {!r}.").format(tz_name))
+ tz_name = tz_name.strip()
+ LOG.debug(_("Setting time zone to {!r}.").format(tz_name))
+ cls.tz = pytz.timezone(tz_name)
+ cls.tz_name = tz_name
+
+ # -------------------------------------------------------------------------
+ def __del__(self):
+ """Destructor."""
+
+ LOG.debug(_("Self destruction."))
+
+ if self.pdns:
+ self.pdns = None
+
+ if self.vsphere:
+ self.vsphere = None
+
+ # -------------------------------------------------------------------------
+ def __call__(self, yaml_file):
+ """Executing the underlying action."""
+
+ if not self.initialized:
+ raise HandlerError(_("{}-object not initialized.").format(self.__class__.__name__))
+
+ try:
+
+ if self.simulate:
+ print()
+ msg_a = _("Simulation mode")
+ msg_b = (
+ "* " + _("Necessary DNS records are not created."),
+ "* " + _("Terraform files are not created.")
+ )
+ ll = 4
+ if len(msg_a) > ll:
+ ll = len(msg_a)
+ for msg in msg_b:
+ if len(msg) > ll:
+ ll = len(msg)
+
+ print(self.colored('#' * (ll + 4), 'AQUA'))
+ line = self.colored('#', 'AQUA') + ' '
+ line += self.colored(msg_a.center(ll), 'YELLOW')
+ line += ' ' + self.colored('#', 'AQUA')
+ print(line)
+ for msg in msg_b:
+ line = '# ' + msg.ljust(ll) + ' #'
+ print(self.colored(line, 'AQUA'))
+ print(self.colored('#' * (ll + 4), 'AQUA'))
+ print()
+
+ self.exec_pdns_zones()
+
+ print()
+ LOG.info(_("Collecting first information from vSPhere."))
+ self.test_vsphere_handlers()
+ self.exec_collect_folders(yaml_file)
+ self.assign_default_vmw_values()
+
+ print()
+ LOG.info(_("Retrieving information from vSphere."))
+
+ self.exec_vmw_clusters()
+ self.exec_vmw_datastores()
+ self.exec_vmw_ds_clusters()
+ self.exec_vmw_networks()
+ self.exec_vmw_templates()
+
+ self.exec_validate_yaml()
+ self.exec_validate_storage()
+ self.exec_validate_iface()
+ self.exec_validate_dns()
+
+ if self.verbose > 2:
+
+ vm_list = []
+ for vm in self.vms:
+ vm_list.append(vm.as_dict())
+ LOG.debug(_("Validated VMs:") + "\n" + pp(vm_list))
+
+ if self.existing_vms:
+ msg = ngettext(
+ "There is one existing virtual machine.",
+ "There are {c} existing virtual machines.",
+ len(self.existing_vms)).format(c=len(self.existing_vms))
+ LOG.warn(msg)
+ if self.verbose > 2:
+ msg = ngettext(
+ "Existing virtual machine:", "Existing virtual machines:",
+ len(self.existing_vms))
+ LOG.debug(msg + '\n' + pp(self.existing_vms))
+ else:
+ LOG.info(_("No existing virtual machines found in YAML file."))
+
+ self.exec_perform_dns()
+ self.exec_project_dir()
+
+ self.exec_tf_files()
+ self.exec_vsphere_folders()
+
+ LOG.info(_("Finished all steps."))
+
+ except AbortExecution as e:
+ LOG.warn(str(e))
+ return
+
+ self.exec_terraform()
+ if self.simulate:
+ print()
+ msg = print(self.colored(
+ _('And how I said before - it was only a simulation!'), 'AQUA'))
+
+ print()
+
+ # -------------------------------------------------------------------------·
+ def exec_terraform(self):
+
+ tf_timeout = 30
+
+ print()
+ LOG.info(_("Executing {!r} ...").format('terraform init'))
+ cmd = [str(self.terraform_cmd), 'init']
+ try:
+ result = self.run(
+ cmd, may_simulate=True, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
+ except CalledProcessError as e:
+ if e.stdout:
+ print(self.colored("Output", 'AQUA') + ':\n' + to_str(e.stdout))
+ if e.stderr:
+ print(self.colored("Error message", ('BOLD', 'RED')) + ':\n' + to_str(e.stderr))
+ raise ExpectedHandlerError(str(e))
+ LOG.debug(_("Completed process:") + "\n" + str(result))
+
+ if self.existing_vms:
+ print()
+ LOG.info(_("Importing existing virtual machines ..."))
+
+ for vm in self.existing_vms:
+
+ vs_name = vm.vsphere
+ print()
+ LOG.info(_("Importing VM {!r}.").format(vm.name))
+ vm_obj = 'vsphere_virtual_machine.{}'.format(vm.tf_name)
+ path = '/{dc}/{f}/{p}/{n}'.format(
+ dc=self.vsphere[vs_name].dc, f=self.vsphere[vs_name].dc_obj.vm_folder,
+ p=vm.path, n=vm.name)
+ cmd = [str(self.terraform_cmd), 'import', vm_obj, path]
+ try:
+ result = self.run(
+ cmd, may_simulate=True, timeout=tf_timeout,
+ stdout=PIPE, stderr=PIPE, check=True)
+ except CalledProcessError as e:
+ if e.stdout:
+ print(self.colored("Output", 'AQUA') + ':\n' + to_str(e.stdout))
+ if e.stderr:
+ msg = self.colored("Error message", ('BOLD', 'RED')) + ':\n'
+ msg += to_str(e.stderr)
+ print(msg)
+ LOG.warn(_("Error on importing VM {!r}:").format(vm.name) + ' ' + str(e))
+
+ LOG.debug(_("Completed process:") + "\n" + str(result))
+
+# print()
+# LOG.info(_("Executing {!r} ...").format('terraform plan'))
+# cmd = [str(self.terraform_cmd), 'plan']
+# try:
+# result = self.run(
+# cmd, may_simulate=True, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
+# except CalledProcessError as e:
+# if e.stdout:
+# print(self.colored("Output", 'AQUA') + ':\n' + to_str(e.stdout))
+# if e.stderr:
+# print(self.colored("Error message", ('BOLD', 'RED')) + ':\n' + to_str(e.stderr))
+# raise ExpectedHandlerError(str(e))
+# LOG.debug(_("Completed process:") + "\n" + str(result))
+
+ goto = Path(os.path.relpath(self.project_dir, self.start_dir))
+
+ print()
+ print()
+ print(self.colored(_("Congratulations!"), 'GREEN'))
+ print()
+ print(_("Now you are ready to deploy the following virtual machines:"))
+ for vm in sorted(self.vms, key=lambda x: x.tf_name):
+ print(" * {}".format(vm.fqdn))
+ print()
+ print(_("To start the deployment process change to directory {}").format(
+ self.colored(str(goto), 'GREEN')))
+ print()
+ print(_("and enter: {}").format(self.colored('terraform apply', 'GREEN')))
+ print()
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2024 by Frank Brehm, Berlin
+@summary: A mixin module for the handler module for dns related methods.
+"""
+from __future__ import absolute_import, print_function
+
+# Standard module
+import ipaddress
+import logging
+import socket
+
+# Third party modules
+from fb_tools.common import RE_DOT_AT_END
+from fb_tools.errors import ExpectedHandlerError
+
+# Own modules
+
+from ..errors import AbortExecution
+
+from ..xlate import XLATOR
+
+__version__ = '0.1.1'
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class CrTfHandlerDnsMixin():
+ """A mixin module for the handler module for dns related methods."""
+
+ # -------------------------------------------------------------------------·
+ def exec_pdns_zones(self):
+
+ if self.config.no_pdns:
+ return
+
+ if self.stop_at_step == 'pdns-zones':
+ self.incr_verbosity()
+
+ print()
+ LOG.info(_("Retrieving informations from PowerDNS ..."))
+
+ self.pdns.get_api_zones()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in exploring PowerDNS zones.",
+ "Found {n} errors in exploring PowerDNS zones.",
+ self.eval_errors).format(n=self.eval_errors)
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Finished step {!r}.").format('pdns-zones'))
+ if self.stop_at_step == 'pdns-zones':
+ raise AbortExecution('pdns-zones')
+
+ # -------------------------------------------------------------------------·
+ def exec_validate_dns(self):
+
+ if self.stop_at_step == 'validate-dns':
+ self.incr_verbosity()
+
+ self.validate_dns_mappings()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in validating DNS mappings.",
+ "Found {n} errors in validating DNS mappings.",
+ self.eval_errors).format(n=self.eval_errors)
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Finished step {!r}.").format('validate-dns'))
+ if self.stop_at_step == 'validate-dns':
+ raise AbortExecution('validate-dns')
+
+ # -------------------------------------------------------------------------·
+ def exec_perform_dns(self):
+
+ if self.stop_at_step == 'perform-dns':
+ self.incr_verbosity()
+
+ self.perform_dns()
+
+ LOG.info(_("Finished step {!r}.").format('perform-dns'))
+ if self.stop_at_step == 'perform-dns':
+ raise AbortExecution('perform-dns')
+
+ # --------------------------------------------------------------------------
+ def perform_dns(self):
+
+ if self.config.no_pdns:
+ LOG.debug(_("Power DNS actions are not executed."))
+ return
+
+ print()
+ LOG.info(_("Performing DNS actions ..."))
+ print()
+
+ # TODO: Check for simulate and mappings to create
+
+ errors = 0
+
+ for (fqdn, address) in self.dns_mappings2create['forward']:
+ if not self._perform_dns_forward(fqdn, address):
+ errors += 1
+
+ for (address, fqdn) in self.dns_mappings2create['reverse']:
+ if not self._perform_dns_reverse(address, fqdn):
+ errors += 1
+
+ if errors:
+ msg = ngettext(
+ "There was one error in creating DNS mappings.",
+ "There were {n} errors in creating DNS mappings.", errors).format(n=errors)
+ raise ExpectedHandlerError(msg)
+ else:
+ if self.verbose > 1:
+ LOG.debug(_("No errors in creating DNS mappings."))
+
+ print()
+
+ for zone_name in self.updated_zones:
+ self._increase_zone_serial(zone_name)
+
+ # --------------------------------------------------------------------------
+ def _increase_zone_serial(self, zone_name):
+
+ LOG.info(_("Increasing serial of zone {!r}.").format(zone_name))
+
+ zone = self.pdns.zones[zone_name]
+ zone.increase_serial()
+ zone.notify()
+
+ # --------------------------------------------------------------------------
+ def _perform_dns_forward(self, fqdn, address):
+
+ record_type = 'A'
+ addr_obj = ipaddress.ip_address(address)
+ if addr_obj.version == 6:
+ record_type = 'AAAA'
+
+ canon_fqdn = self.pdns.canon_name(fqdn)
+
+ zone_name = self.pdns.get_zone_for_item(canon_fqdn, is_fqdn=True)
+ if zone_name:
+ if self.verbose > 1:
+ LOG.debug(_("Got zone {z!r} for FQDN {f!r}.").format(
+ z=zone_name, f=canon_fqdn))
+ else:
+ LOG.error(_("Did not found zone to insert {t}-record for {f!r}.").format(
+ t=record_type, f=fqdn))
+ return False
+
+ zone = self.pdns.zones[zone_name]
+ if addr_obj.is_private:
+ zone.add_address_record(
+ fqdn, address, set_ptr=False, comment='local',
+ account=self.config.pdns_comment_account, append_comments=True)
+ else:
+ zone.add_address_record(fqdn, address, set_ptr=False)
+ if zone_name not in self.updated_zones:
+ self.updated_zones.append(zone_name)
+ return True
+
+ # --------------------------------------------------------------------------
+ def _perform_dns_reverse(self, address, fqdn):
+
+ LOG.debug(_("Trying to create PTR-record {a!r} => {f!r}.").format(
+ f=fqdn, a=str(address)))
+
+ pointer = self.pdns.canon_name(address.reverse_pointer)
+ if self.verbose > 1:
+ LOG.debug(_("PTR of {a!r}: {p!r}.").format(a=str(address), p=pointer))
+
+ zone_name = self.pdns.get_zone_for_item(pointer, is_fqdn=True)
+ if zone_name:
+ if self.verbose > 1:
+ LOG.debug(_("Got reverse zone {z!r} for address {a!r}.").format(
+ z=zone_name, a=str(address)))
+ else:
+ LOG.warn(_("Did not found zone to insert PTR-record {p!r} ({a}).").format(
+ p=pointer, a=str(address)))
+ return True
+
+ zone = self.pdns.zones[zone_name]
+ zone.add_ptr_record(pointer, fqdn)
+ if zone_name not in self.updated_zones:
+ self.updated_zones.append(zone_name)
+ return True
+
+ # -------------------------------------------------------------------------·
+ def validate_dns_mappings(self):
+
+ LOG.info(_("Validating DNS mappings ..."))
+ self._validate_forward_dns_mappings()
+ self._validate_reverse_dns_mappings()
+
+ lines = []
+ if self.dns_mappings2create['forward']:
+ for pair in self.dns_mappings2create['forward']:
+ line = ' * {n!r} => {a!r}'.format(n=pair[0], a=str(pair[1]))
+ lines.append(line)
+ else:
+ lines.append(self.colored('>>> ' + _('None') + ' <<<', 'AQUA'))
+ LOG.info(_("Forward DNS entries to create:") + "\n" + '\n'.join(lines))
+
+ lines = []
+ if self.dns_mappings2create['reverse']:
+ for pair in self.dns_mappings2create['reverse']:
+ line = ' * {r} ({a!r}) => {n!r}'.format(
+ r=pair[0].reverse_pointer, n=pair[1], a=str(pair[0]))
+ lines.append(line)
+ else:
+ lines.append(self.colored('>>> ' + _('None') + ' <<<', 'AQUA'))
+ LOG.info(_("Reverse DNS entries to create:") + "\n" + '\n'.join(lines))
+
+ # -------------------------------------------------------------------------·
+ def _validate_forward_dns_mappings(self):
+
+ if not self.dns_mapping['forward']:
+ return
+
+ LOG.debug(_("Validating forward DNS mappings ..."))
+
+ for (fqdn, address) in self.dns_mapping['forward']:
+
+ if self.verbose > 1:
+ LOG.debug(_("Validating {f!r} => {a!r}.").format(f=fqdn, a=str(address)))
+
+ results_v4 = []
+ results_v6 = []
+
+ try:
+ addr_infos = socket.getaddrinfo(fqdn, 80)
+ except socket.gaierror:
+ addr_infos = []
+
+ for addr_info in addr_infos:
+ if addr_info[0] not in (socket.AF_INET, socket.AF_INET6):
+ continue
+ addr = ipaddress.ip_address(addr_info[4][0])
+ if addr.version == 4:
+ if addr not in results_v4:
+ results_v4.append(addr)
+ else:
+ if addr not in results_v6:
+ results_v6.append(addr)
+ if self.verbose > 2:
+ if results_v4 or results_v6:
+ lines = []
+ for addr in results_v4 + results_v6:
+ lines.append(' * {}'.format(str(addr)))
+ out = '\n'.join(lines)
+ LOG.debug(_("Found existing addresses for {f!r}:").format(f=fqdn) + '\n' + out)
+ else:
+ LOG.debug(_("Did not found existing addresses for {!r}.").format(fqdn))
+
+ if address.version == 4:
+ if not results_v4:
+ self.dns_mappings2create['forward'].append((fqdn, address))
+ continue
+ if address in results_v4:
+ LOG.debug(_("FQDN {f!r} already points to {a!r}.").format(
+ f=fqdn, a=str(address)))
+ continue
+ else:
+ if not results_v6:
+ self.dns_mappings2create['forward'].append((fqdn, address))
+ continue
+ if address in results_v6:
+ LOG.debug(_("FQDN {f!r} already points to {a!r}.").format(
+ f=fqdn, a=str(address)))
+ continue
+
+ alist = '\n'.join(map(lambda x: ' * {}'.format(str(x)), results_v4 + results_v6))
+ msg = (_(
+ "FQDN {f!r} has already existing addresses, "
+ "but none of them are {a!r}:").format(f=fqdn, a=str(address)) + "\n" + alist)
+ if self.ignore_existing_dns:
+ LOG.warn(msg)
+ self.dns_mappings2create['forward'].append((fqdn, address))
+ else:
+ LOG.error(msg)
+ self.eval_errors += 1
+
+ # -------------------------------------------------------------------------·
+ def _validate_reverse_dns_mappings(self):
+
+ if not self.dns_mapping['reverse']:
+ return
+
+ LOG.debug(_("Validating reverse DNS mappings ..."))
+
+ for (address, fqdn) in self.dns_mapping['reverse']:
+
+ if self.verbose > 1:
+ LOG.debug(_("Validating {a!r} => {f!r}.").format(f=fqdn, a=str(address)))
+
+ try:
+ info = socket.gethostbyaddr(str(address))
+ except socket.herror:
+ info = []
+ if self.verbose > 2:
+ LOG.debug(_("Got reverse info:") + "\n" + str(info))
+ ptr = None
+ if info:
+ ptr = info[0]
+
+ if not ptr:
+ if self.verbose > 1:
+ LOG.debug(_("Did not found reverse pointer for {!r}.").format(str(address)))
+ self.dns_mappings2create['reverse'].append((address, fqdn))
+ continue
+
+ ptr = RE_DOT_AT_END.sub('', ptr).lower()
+ fqdn_canon = RE_DOT_AT_END.sub('', fqdn).lower()
+
+ if self.verbose > 1:
+ LOG.debug(_("Found reverse pointer {a!r} => {f!r}.").format(f=ptr, a=str(address)))
+ if fqdn_canon == ptr:
+ if self.verbose > 1:
+ LOG.debug(_("Reverse pointer for {!r} was already existing.").format(
+ str(address)))
+ continue
+
+ LOG.error(_("Address {a!r} has already an existing reverse pointer to {p!r}.").format(
+ a=str(address), p=ptr))
+ self.eval_errors += 1
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2024 by Frank Brehm, Berlin
+@summary: A mixin module for the handler for methods for creating terraform project files.
+"""
+from __future__ import absolute_import, print_function
+
+# Standard module
+import logging
+import os
+import shutil
+import stat
+import textwrap
+
+from pathlib import Path
+
+# Third party modules
+
+from fb_tools.common import pp
+from fb_tools.errors import ExpectedHandlerError
+
+# Own modules
+from ..errors import AbortExecution
+
+
+from ..xlate import XLATOR
+
+__version__ = '0.5.3'
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class CrTfHandlerFilesMixin():
+ """A mixin module for the handler module for reading and evaluation."""
+
+ # -------------------------------------------------------------------------·
+ def exec_project_dir(self):
+
+ if self.stop_at_step == 'project-dir':
+ self.incr_verbosity()
+
+ self.ensure_project_dir()
+ self.clean_project_dir()
+
+ LOG.info(_("Finished step {!r}.").format('project-dir'))
+ if self.stop_at_step == 'project-dir':
+ raise AbortExecution('project-dir')
+
+ # -------------------------------------------------------------------------·
+ def exec_tf_files(self):
+
+ if self.stop_at_step == 'tf-files':
+ self.incr_verbosity()
+
+ self.create_terraform_files()
+
+ LOG.info(_("Finished step {!r}.").format('tf-files'))
+ if self.stop_at_step == 'tf-files':
+ raise AbortExecution('tf-files')
+
+ # -------------------------------------------------------------------------·
+ def get_tf_name_network(self, net_name, *args):
+
+ default = None
+ has_default = False
+ if len(args):
+ if len(args) > 1:
+ msg = ngettext(
+ "Method {c}.{m} expected at most one argument, got {n}.",
+ "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
+ c=self.__class__.__name__, e=2, m='get_tf_name_network', n=len(args))
+ raise TypeError(msg)
+ default = args[0]
+ has_default = True
+
+ if net_name in self.vsphere.network_mapping:
+ return self.vsphere.network_mapping[net_name]
+ if has_default:
+ return default
+ raise KeyError(_("Did not found network {!r}.").format(net_name))
+
+ # --------------------------------------------------------------------------
+ def get_tf_name_ds_cluster(self, dsc_name, *args):
+
+ default = None
+ has_default = False
+ if len(args):
+ if len(args) > 1:
+ msg = ngettext(
+ "Method {c}.{m} expected at most one argument, got {n}.",
+ "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
+ c=self.__class__.__name__, e=2, m='get_tf_name_ds_cluster', n=len(args))
+ raise TypeError(msg)
+ default = args[0]
+ has_default = True
+
+ if dsc_name in self.vsphere.ds_cluster_mapping:
+ return self.vsphere.ds_cluster_mapping[dsc_name]
+ if has_default:
+ return default
+ raise KeyError(_("Did not found datastore cluster {!r}.").format(dsc_name))
+
+ # --------------------------------------------------------------------------
+ def get_tf_name_datastore(self, ds_name, *args):
+
+ default = None
+ has_default = False
+ if len(args):
+ if len(args) > 1:
+ msg = ngettext(
+ "Method {c}.{m} expected at most one argument, got {n}.",
+ "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
+ c=self.__class__.__name__, e=2, m='get_tf_name_datastore', n=len(args))
+ raise TypeError(msg)
+ default = args[0]
+ has_default = True
+
+ if ds_name in self.vsphere.ds_mapping:
+ return self.vsphere.ds_mapping[ds_name]
+ if has_default:
+ return default
+ raise KeyError(_("Did not found datastore {!r}.").format(ds_name))
+
+ # --------------------------------------------------------------------------
+ def ensure_project_dir(self):
+
+ print()
+ LOG.info(_("Ensuring existence of directory {!r}.").format(str(self.project_dir)))
+
+ if self.project_dir.exists():
+ if self.project_dir.is_dir():
+ LOG.debug(_("Directory {!r} already exists.").format(str(self.project_dir)))
+ else:
+ msg = _("Path {!r} exists, but is not a directory.").format(str(self.project_dir))
+ raise ExpectedHandlerError(msg)
+ else:
+ LOG.info(_("Creating directory {!r} ...").format(str(self.project_dir)))
+ if self.simulate:
+ LOG.debug(_("Simulation mode - directory will not be created."))
+ else:
+ try:
+ os.makedirs(str(self.project_dir), mode=0o755)
+ except PermissionError as e:
+ msg = _("Could not create directory {d!r}: {e}").format(
+ d=str(self.project_dir), e=e)
+ raise ExpectedHandlerError(msg)
+
+ if not self.project_dir.exists():
+ if self.simulate:
+ return
+ else:
+ msg = _("Directory {!r} does not exists ?!?!").format(str(self.project_dir))
+ raise ExpectedHandlerError(msg)
+
+ if not os.access(str(self.project_dir), os.W_OK):
+ msg = _("No write access to directory {!r}.").format(str(self.project_dir))
+ raise ExpectedHandlerError(msg)
+
+ LOG.debug(_("Changing into directory {!r}.").format(str(self.project_dir)))
+ os.chdir(str(self.project_dir))
+
+ self.script_dir_rel = Path(os.path.relpath(
+ str(self.script_dir), str(self.project_dir)))
+ LOG.debug(_("Script-Dir relative to project dir: {!r}.").format(str(self.script_dir_rel)))
+
+ filemode = stat.S_IMODE(self.private_key.stat().st_mode)
+ LOG.debug(_("Permissions of {k!r} are {m:04o}.").format(
+ k=str(self.private_key), m=filemode))
+ if filemode not in [0o400, 0o600]:
+ LOG.info(_("Setting permissions of {k!r} from {o:04o} to {m:04o}.").format(
+ k=str(self.private_key), o=filemode, m=0o600))
+ self.private_key.chmod(0o600)
+
+ self.keys_dir_rel = Path(os.path.relpath(
+ str(self.keys_dir), str(self.project_dir)))
+ LOG.debug(_("Directory for SSH deploy keys relative to project dir: {!r}.").format(
+ str(self.keys_dir_rel)))
+
+ self.private_key_rel = self.keys_dir_rel / self.sshkey_basename
+ LOG.debug(_(
+ "Filename of the private SSH deploy key relative to project "
+ "dir: {!r}").format(str(self.private_key_rel)))
+
+ if self.verbose > 1:
+ LOG.debug(_("Checking {!r} for a previous terraform configuration.").format(
+ str(self.project_dir)))
+
+ tf_path = self.project_dir / '.terraform'
+ if tf_path.exists() and not tf_path.is_dir():
+ msg = _("In {d!r} there exists already {w!r}, but this is not a directory.").format(
+ d=str(self.project_dir), w='.terraform')
+ raise ExpectedHandlerError(msg)
+
+ state_path = self.project_dir / 'terraform.tfstate'
+ if state_path.exists() and not state_path.is_file():
+ msg = _("In {d!r} there exists already {w!r}, but this not a file.").format(
+ d=str(self.project_dir), w='terraform.tfstate')
+ raise ExpectedHandlerError(msg)
+
+ if tf_path.is_dir() and state_path.is_file():
+ msg = _(
+ "In directory {d!r} there are already existing both {w1!r} and {w2!r}. "
+ "Is this an old terraform project?").format(
+ d=str(self.project_dir), w1='.terraform', w2='terraform.tfstate')
+ raise ExpectedHandlerError(msg)
+
+ # --------------------------------------------------------------------------
+ def clean_project_dir(self):
+
+ print()
+ LOG.info(_("Cleaning project directory {!r}.").format(str(self.project_dir)))
+
+ files = []
+ for path in self.project_dir.glob('*'):
+ files.append(path)
+ for path in self.project_dir.glob('.terraform'):
+ files.append(path)
+
+ if not files:
+ LOG.debug(_("Directory {!r} is already clean.").format(str(self.project_dir)))
+ return
+ for pfile in files:
+ if pfile.exists():
+ if pfile.is_dir():
+ LOG.debug(_("Removing recursive directory {!r} ...").format(str(pfile)))
+ if not self.simulate:
+ shutil.rmtree(str(pfile))
+ else:
+ LOG.debug(_("Removing {!r} ...").format(str(pfile)))
+ if not self.simulate:
+ pfile.unlink()
+
+ # --------------------------------------------------------------------------
+ def create_terraform_files(self):
+
+ print()
+ print()
+ msg = _("Creating all necessary files for terraform.")
+ ll = 6
+ if len(msg) > ll:
+ ll = len(msg)
+ print(self.colored('#' * (ll + 6), 'AQUA'))
+ line = self.colored('#', 'AQUA') + ' '
+ line += self.colored(msg.center(ll), 'YELLOW')
+ line += ' ' + self.colored('#', 'AQUA')
+ print(line)
+ print(self.colored('#' * (ll + 6), 'AQUA'))
+ print()
+ print()
+
+ self.create_varfiles()
+ self.create_dcfile()
+ self.create_backend_file()
+ self.create_instance_files()
+
+ # --------------------------------------------------------------------------
+ def create_varfiles(self):
+
+ LOG.debug(_("Creating {!r} ...").format('terraform.tfvars'))
+
+ vs_name = None
+ for vs_name in self.vsphere.keys():
+ break
+ if self.verbose > 1:
+ LOG.debug(_("Creating {w} for VSPhere {v!r} ...").format(
+ w='dcfile', v=vs_name))
+
+ vs_host = self.config.vsphere[vs_name].host
+ vs_user = self.config.vsphere[vs_name].user
+ vs_pwd = self.config.vsphere[vs_name].password
+ vs_dc = self.config.vsphere[vs_name].dc
+
+ rhsm_user = self.config.rhsm_user
+ rhsm_password = self.config.rhsm_password
+
+ content = textwrap.dedent('''\
+ ## filename: terraform.tfvars
+ ## This file declares the values for the variables to be used in the instance.tf playbook
+
+ #
+ # ATTENTION!
+ #
+ # To avoid annoying questions for password and API key
+ # create manually a file 'terraform-private.auto.tfvars"
+ # with the following content:
+ #
+ # vsphere_username = "<USERNAME>"
+ # vsphere_userpassword = "<PASSWORD>"
+ # rhsm_user_password = "<PASSWORD>"
+ #
+ # with the correct values. This file will not be under GIT control
+ #
+
+ ''')
+
+ if self.simulate:
+ if self.verbose:
+ print(content)
+ else:
+ with open('terraform.tfvars', 'w', **self.open_opts) as fh:
+ fh.write(content)
+ os.chmod('terraform.tfvars', self.std_file_permissions)
+
+ # Sensible stuff
+ if vs_user or vs_pwd:
+ content = '# Private sensible information. Please keep this file secret.\n\n'
+ if vs_user:
+ content += 'vsphere_username = "{}"\n'.format(vs_user)
+ if vs_pwd:
+ content += 'vsphere_userpassword = "{}"\n'.format(vs_pwd)
+ if rhsm_user:
+ content += 'rhsm_user_name = "{}"\n'.format(rhsm_user)
+ if rhsm_password:
+ content += 'rhsm_user_password = "{}"\n'.format(rhsm_password)
+ content += '\n'
+
+ LOG.debug(_("Creating {!r} ...").format('private.auto.tfvars'))
+ if self.simulate:
+ if self.verbose:
+ print(content)
+ else:
+ with open('private.auto.tfvars', 'w', **self.open_opts) as fh:
+ fh.write(content)
+ os.chmod('private.auto.tfvars', self.std_secure_file_permissions)
+
+ # File with variable declarations
+ content = textwrap.dedent('''\
+ # filename: variables.tf
+ # definition of the variables to be used in the play
+ # declaration happens in the file terraform.tfvars and private.auto.tfvars
+
+ ''')
+
+ tpl = textwrap.dedent('''\
+ variable "vsphere_vcenter" {{
+ default = "{}"
+ description = "IP or DNS of the vSphere center."
+ type = string
+ }}
+
+ ''')
+ content += tpl.format(vs_host)
+
+ tpl = textwrap.dedent('''\
+ variable "vsphere_username" {
+ description = "vSphere accountname to be used."
+ type = string
+ }
+
+ variable "vsphere_userpassword" {
+ description = "Password for vSphere accountname."
+ type = string
+ }
+
+ ''')
+ content += tpl
+
+ tpl = textwrap.dedent('''\
+ variable "vsphere_datacenter" {{
+ default = "{dc}"
+ description = "Name of the vSphere datacenter to use."
+ type = string
+ }}
+
+ ''')
+ content += tpl.format(dc=vs_dc)
+
+ tpl = textwrap.dedent('''\
+ variable "rhsm_user_name" {{
+ default = "{rhsm_user}"
+ description = "Username of the RedHat subscription management user."
+ type = string
+ }}
+
+ variable "rhsm_user_password" {{
+ description = "Password of the RedHat subscription management user."
+ type = string
+ }}
+
+ ''')
+ content += tpl.format(rhsm_user=self.config.default_rhsm_user)
+
+ tpl = textwrap.dedent('''\
+ variable "timezone" {{
+ default = "{tz}"
+ description = "The global timezone used for VMs"
+ type = string
+ }}
+
+ ''')
+ content += tpl.format(tz=self.tz_name)
+
+ LOG.debug(_("Creating {!r} ...").format('variables.tf'))
+ if self.simulate:
+ if self.verbose:
+ print(content)
+ else:
+ with open('variables.tf', 'w', **self.open_opts) as fh:
+ fh.write(content)
+ os.chmod('variables.tf', self.std_file_permissions)
+
+ # --------------------------------------------------------------------------
+ def create_dcfile(self):
+
+ vs_name = None
+ for vs_name in self.vsphere.keys():
+ break
+ vsphere = self.vsphere[vs_name]
+
+ LOG.debug(_("Creating {!r} ...").format('dc.tf'))
+ if self.verbose > 1:
+ LOG.debug(_("Creating {w} for VSPhere {v!r} ...").format(
+ w='dcfile', v=vs_name))
+
+ content = textwrap.dedent('''\
+ # filename: dc.tf
+ # Configuring the VMware VSphere Provider and some dependend common used objects
+
+ provider "vsphere" {
+ vsphere_server = var.vsphere_vcenter
+ user = var.vsphere_username
+ password = var.vsphere_userpassword
+ allow_unverified_ssl = true
+ ''')
+
+# if self.min_version_vsphere_provider:
+# content += ' version = ">= {}"\n'.format(
+# str(self.min_version_vsphere_provider))
+
+ content += textwrap.dedent('''\
+ }
+
+ data "vsphere_datacenter" "dc" {
+ name = var.vsphere_datacenter
+ }
+
+ ''')
+
+ for cluster in vsphere.clusters:
+ tpl = textwrap.dedent('''\
+ data "vsphere_resource_pool" "{pv}" {{
+ name = "{pn}"
+ datacenter_id = data.vsphere_datacenter.dc.id
+ }}
+
+ ''')
+ content += tpl.format(
+ pv=cluster.resource_pool_var, pn=cluster.resource_pool_name)
+
+ if self.used_dc_clusters:
+ for dsc_name in sorted(self.used_dc_clusters[vs_name], key=str.lower):
+ dsc_tf_name = vsphere.ds_cluster_mapping[dsc_name]
+ tpl = textwrap.dedent('''\
+ data "vsphere_datastore_cluster" "{tn}" {{
+ name = "{n}"
+ datacenter_id = data.vsphere_datacenter.dc.id
+ }}
+
+ ''')
+ content += tpl.format(tn=dsc_tf_name, n=dsc_name)
+
+ if self.used_datastores:
+ for ds_name in sorted(self.used_datastores[vs_name], key=str.lower):
+ ds_tf_name = vsphere.ds_mapping[ds_name]
+ tpl = textwrap.dedent('''\
+ data "vsphere_datastore" "{tn}" {{
+ name = "{n}"
+ datacenter_id = data.vsphere_datacenter.dc.id
+ }}
+
+ ''')
+ content += tpl.format(tn=ds_tf_name, n=ds_name)
+
+ for net_name in sorted(self.used_networks[vs_name], key=str.lower):
+ net_tf_name = vsphere.network_mapping[net_name]
+ tpl = textwrap.dedent('''\
+ data "vsphere_network" "{tn}" {{
+ name = "{n}"
+ datacenter_id = data.vsphere_datacenter.dc.id
+ }}
+
+ ''')
+ content += tpl.format(n=net_name, tn=net_tf_name)
+
+ if self.vsphere_templates:
+ for tname in sorted(self.vsphere_templates[vs_name].keys(), key=str.lower):
+ tpl_tf_name = self.vsphere_templates[vs_name][tname].tf_name
+ tpl = textwrap.dedent('''\
+ data "vsphere_virtual_machine" "{tn}" {{
+ name = "{n}"
+ datacenter_id = data.vsphere_datacenter.dc.id
+ }}
+
+ ''')
+ content += tpl.format(tn=tpl_tf_name, n=tname)
+
+ tpl = textwrap.dedent('''\
+ data "vsphere_tag_category" "{cid}" {{
+ name = "{cname}"
+ }}
+
+ data "vsphere_tag" "{tid}" {{
+ name = "{tname}"
+ category_id = data.vsphere_tag_category.{cid}.id
+ }}
+
+ ''').format(
+ cid=self.config.vsphere_tag_cat_os_id,
+ cname=self.config.vsphere_tag_cat_os_name,
+ cdesc=self.config.vsphere_tag_cat_os_desc,
+ tid=self.config.vsphere_tag_os_rhel_id,
+ tname=self.config.vsphere_tag_os_rhel_name,
+ tdesc=self.config.vsphere_tag_os_rhel_desc,
+ )
+ content += tpl
+
+
+ if self.simulate:
+ if self.verbose:
+ print(content)
+ else:
+ with open('dc.tf', 'w', **self.open_opts) as fh:
+ fh.write(content)
+ os.chmod('dc.tf', self.std_file_permissions)
+
+ # --------------------------------------------------------------------------
+ def create_backend_file(self):
+
+ file_name = 'backend.tf'
+ LOG.debug(_("Creating {!r} ...").format(file_name))
+
+ tpl = textwrap.dedent('''\
+ # Configuration of the backend for storing the terraform status information
+ # and the minimum required version of terraform
+
+ terraform {{
+ backend "consul" {{
+ address = "{host}"
+ scheme = "{scheme}"
+ path = "{prefix}/{project}"
+ }}
+ ''')
+
+ project = self.full_project_name
+ if not project:
+ project = self.project_name
+
+ content = tpl.format(
+ host=self.config.tf_backend_host, scheme=self.config.tf_backend_scheme,
+ prefix=self.config.tf_backend_path_prefix, project=project)
+
+ if self.min_version_terraform:
+ content += ' required_version = ">= {}"\n'.format(str(self.min_version_terraform))
+ else:
+ LOG.warn(_("No minimum version of Terraform defined."))
+
+ content += '}\n\n'
+
+ if self.simulate:
+ if self.verbose:
+ print(content)
+ else:
+ with open(file_name, 'w', **self.open_opts) as fh:
+ fh.write(content)
+ os.chmod(file_name, self.std_file_permissions)
+
+ # --------------------------------------------------------------------------
+ def create_instance_files(self):
+
+ LOG.debug(_("Creating terraform files for VM instances."))
+
+ for vm in sorted(self.vms, key=lambda x: x.tf_name):
+ self.create_instance_file(vm)
+
+ # --------------------------------------------------------------------------
+ def create_instance_file(self, vm):
+
+ vs_name = vm.vsphere
+
+ fname = 'instance.' + vm.name + '.tf'
+ LOG.debug(_("Creating file {f!r} for VM instance {n!r}.").format(
+ f=fname, n=vm.name))
+
+ guest_id = self.config.guest_id
+ tpl_vm = None
+ if vm.vm_template:
+ tpl_vm = self.vsphere_templates[vs_name][vm.vm_template]
+ if self.verbose > 3:
+ LOG.debug(_("Using template:") + "\n" + pp(tpl_vm))
+ guest_id = 'data.vsphere_virtual_machine.{}.guest_id'.format(tpl_vm.tf_name)
+ else:
+ guest_id = '"' + guest_id + '"'
+
+ content = self._create_instfile_general(vm, guest_id, tpl_vm)
+
+ i = 0
+ for iface in vm.interfaces:
+ content += self._create_instfile_if(vm, iface, i, tpl_vm)
+ i += 1
+
+ for disk_name in sorted(vm.disks.keys()):
+ content += self._create_instfile_disk(vm, disk_name)
+
+ content += self._create_instfile_custom(vm, tpl_vm)
+
+ if self.verbose > 1:
+ LOG.debug(_("Writing {!r}").format(fname))
+
+ if self.simulate:
+ if self.verbose:
+ print(content)
+ else:
+ with open(fname, 'w', **self.open_opts) as fh:
+ fh.write(content)
+ os.chmod(fname, self.std_file_permissions)
+
+ # --------------------------------------------------------------------------
+ def _create_instfile_general(self, vm, guest_id, tpl_vm):
+
+ vs_name = vm.vsphere
+
+ # ## General definitions of VM
+ if self.verbose > 1:
+ LOG.debug(_("Generating global definitions of {!r}.").format(vm.name))
+ content = textwrap.dedent('''\
+ # Definition of the VM instance {!r}.
+
+ ''').format(vm.name)
+
+ cluster = self.vsphere[vs_name].get_cluster_by_name(vm.cluster)
+ if not cluster:
+ msg = _("Cluster {!r} not found - this shouldn't be happened.").format(
+ vm.cluster)
+ raise RuntimeError(msg)
+
+ content += textwrap.dedent('''\
+ resource "vsphere_virtual_machine" "{tn}" {{
+
+ resource_pool_id = data.vsphere_resource_pool.{pv}.id
+ name = "{n}"
+ ''').format(tn=vm.tf_name, n=vm.name, pv=cluster.resource_pool_var)
+
+ if vm.ds_cluster:
+ dsc_tf_name = self.vsphere[vs_name].ds_cluster_mapping[vm.ds_cluster]
+ tpl = ' datastore_cluster_id = data.vsphere_datastore_cluster.{}.id\n'
+ content += tpl.format(dsc_tf_name)
+
+ if vm.datastore:
+ ds_tf_name = self.vsphere[vs_name].ds_mapping[vm.datastore]
+ tpl = ' datastore_id = data.vsphere_datastore.{}.id\n'
+ content += tpl.format(ds_tf_name)
+
+ content += textwrap.indent(textwrap.dedent('''\
+ num_cpus = "{cpu}"
+ folder = "{f}"
+ num_cores_per_socket = "1"
+ cpu_hot_add_enabled = "true"
+ cpu_hot_remove_enabled = "true"
+ memory = "{m}"
+ memory_hot_add_enabled = "true"
+ boot_delay = "{b}"
+ guest_id = {g}
+ scsi_controller_count = "{c_count}"
+ '''), ' ').format(
+ g=guest_id, cpu=vm.num_cpus, f=vm.folder, m=vm.memory, b=int(vm.boot_delay * 1000),
+ c_count=vm.disks.get_ctrlr_count())
+ if vm.vm_template:
+ tpl = ' scsi_type = data.vsphere_virtual_machine.{}.scsi_type\n'
+ content += tpl.format(tpl_vm.tf_name)
+ content += ' enable_disk_uuid = "true"\n\n'
+
+ if vm.is_rhel:
+ content += ' tags = [\n data.vsphere_tag.{}.id\n ]\n\n'.format(
+ self.config.vsphere_tag_os_rhel_id)
+
+ content += textwrap.indent(textwrap.dedent('''\
+ lifecycle {
+ ignore_changes = all
+ }
+ '''), ' ')
+ content += '\n'
+
+ return content
+
+ # --------------------------------------------------------------------------
+ def _create_instfile_if(self, vm, iface, i, tpl_vm):
+
+ vs_name = vm.vsphere
+
+ # ## Interface definition
+
+ if self.verbose > 1:
+ LOG.debug(_("Generating interface definition {i} of {v!r}.").format(i=i, v=vm.name))
+ nw = iface.network
+ nw_name = self.vsphere[vs_name].network_mapping[nw]
+
+ content = textwrap.indent(textwrap.dedent('''\
+ network_interface {{
+ network_id = data.vsphere_network.{n}.id
+ adapter_type = data.{vvm}.{t}.{nit}[0]
+ }}
+ '''), ' ').format(
+ n=nw_name, t=tpl_vm.tf_name,
+ vvm='vsphere_virtual_machine', nit='network_interface_types')
+ content += '\n'
+
+ return content
+
+ # --------------------------------------------------------------------------
+ def _create_instfile_disk(self, vm, disk_name):
+
+ # ## Disk definitions
+ if self.verbose > 1:
+ LOG.debug(_("Generating disk definition {n} of {v!r}.").format(n=disk_name, v=vm.name))
+ disk = vm.disks[disk_name]
+ content = textwrap.indent(textwrap.dedent('''\
+ disk {{
+ label = "{n}"
+ size = "{s}"
+ eagerly_scrub = "false"
+ thin_provisioned = "false"
+ unit_number = {i}
+ '''), ' ').format(n=disk_name, i=disk.unit_number, s=int(disk.size_gb))
+
+ content += ' }\n\n'
+
+ return content
+
+ # --------------------------------------------------------------------------
+ def _create_instfile_custom(self, vm, tpl_vm):
+
+ # ## Customization of VM
+ if self.verbose > 1:
+ LOG.debug(_("Generating customization of {v!r}.").format(v=vm.name))
+
+ content = textwrap.indent(textwrap.dedent('''\
+ clone {{
+ template_uuid = data.vsphere_virtual_machine.{t}.id
+
+ customize {{
+ linux_options {{
+ host_name = "{h}"
+ domain = "{d}"
+ time_zone = var.timezone
+ }}
+
+ '''), ' ').format(
+ t=tpl_vm.tf_name, h=vm.hostname, d=vm.domain)
+
+ content += self._create_instfile_nw(vm)
+ content += ' }\n'
+ content += ' }\n\n'
+
+ # ## local SSH cleanup before any actions
+ content += textwrap.indent(textwrap.dedent('''\
+ provisioner "local-exec" {{
+ command = "ssh-keygen -R {h} || true"
+ }}
+
+ provisioner "local-exec" {{
+ command = "ssh-keygen -R {i} || true"
+ }}
+
+ '''), ' ').format(h=vm.fqdn, i=vm.interfaces[0].address)
+
+ # ## Copying postinstall scripts to VM
+
+ files = ['functions.rc', 'conf-resolver', 'create-motd']
+ if vm.is_rhel:
+ files.append('register-rhel')
+ files.append('update-networkmanager')
+ if vm.has_puppet:
+ files.append('init-puppet')
+ files.append('update-all-packages')
+
+ for sname in files:
+
+ if self.verbose > 1:
+ LOG.debug(_("Generating file provisioner for {f!r} of {v!r}.").format(
+ f=sname, v=vm.name))
+
+ content += textwrap.indent(textwrap.dedent('''\
+ provisioner "file" {{
+ source = "{d}/{f}"
+ destination = "/tmp/{f}"
+ connection {{
+ type = "ssh"
+ host = "{h}"
+ user = "root"
+ private_key = file("{k}")
+ agent = "false"
+ }}
+ }}
+
+ '''), ' ').format(
+ d=self.script_dir_rel, f=sname, h=vm.fqdn, k=self.private_key_rel)
+
+ if vm.is_rhel:
+ if self.verbose > 1:
+ LOG.debug(_("Generating file provisioner for {f!r} of {v!r}.").format(
+ f='rhsm-user-passwd', v=vm.name))
+
+ content += textwrap.indent(textwrap.dedent('''\
+ provisioner "file" {{
+ destination = "/tmp/rhsm-user-passwd"
+ content = "${{var.rhsm_user_password}}"
+ connection {{
+ type = "ssh"
+ host = "{h}"
+ user = "root"
+ private_key = file("{k}")
+ agent = "false"
+ }}
+ }}
+
+ '''), ' ').format(h=vm.fqdn, k=self.private_key_rel)
+
+ # ## Postinstall commands on host
+ commands = []
+
+ commands.append("usermod -c 'root {}' root".format(vm.fqdn))
+
+ commands.append("chmod +x /tmp/conf-resolver")
+ cmd = '/tmp/conf-resolver'
+ for ns in vm.nameservers:
+ cmd += ' --ns {!r}'.format(str(ns))
+ for dom in vm.searchdomains:
+ cmd += ' --search {!r}'.format(dom)
+ if vm.dns_options:
+ cmd += ' --options {!r}'.format(vm.dns_options)
+ else:
+ cmd += ' --options {!r}'.format('')
+ commands.append(cmd)
+ commands.append("rm -fv /tmp/conf-resolver")
+
+ purpose = self.re_doublequote.sub('\\\"', vm.purpose)
+
+ zone = "{z}/{c}".format(z=vm.vsphere, c=vm.cluster)
+
+ commands.append("chmod +x /tmp/create-motd")
+ cmd = (
+ "/tmp/create-motd --purpose '{p}' --hardware 'vmware (x86_64)' --owner '{o}' "
+ "--location 'VMWare' --zone '{z}' --customer '{c}' --email '{m}' --tier '{t}' "
+ "--environment '{e}' --role '{r}'").format(
+ p=purpose, t=vm.puppet_tier, o=vm.customer, z=zone, c=vm.puppet_customer,
+ m=vm.puppet_contact, e=vm.puppet_env, r=vm.puppet_role)
+ if vm.puppet_project:
+ cmd += " --project '{pr}'".format(pr=vm.puppet_project)
+ cmd += " | tee /etc/motd"
+ commands.append(cmd)
+ commands.append("rm -fv /tmp/create-motd")
+
+ # ## Registring RHEL on RedHat Subscription Management
+ if vm.is_rhel:
+ commands.append("chmod +x /tmp/register-rhel")
+ commands.append("/tmp/register-rhel -v -U '${var.rhsm_user_name}'")
+ commands.append("rm -fv /tmp/rhsm-user-passwd /tmp/register-rhel")
+
+ # ## Configuring and starting puppet
+ if vm.has_puppet:
+ commands.append("chmod +x /tmp/init-puppet")
+ cmd = "/tmp/init-puppet --environment '{e}' --customer '{c}' "
+ if vm.puppet_project:
+ cmd += "--project '{pr}' "
+ cmd += "--role '{r}' --owner '{o}' --tier '{t}' --purpose '{p}' --email '{m}'"
+ cmd += " --zone '{z}'"
+ # if vm.puppet_initial_install:
+ # cmd += " --initial-install"
+ cmd = cmd.format(
+ p=purpose, t=vm.puppet_tier, o=vm.customer, c=vm.puppet_customer, z=zone,
+ pr=vm.puppet_project, m=vm.puppet_contact, e=vm.puppet_env, r=vm.puppet_role)
+ commands.append(cmd)
+ commands.append("rm -fv /tmp/init-puppet")
+
+ content += ' provisioner "remote-exec" {\n'
+ content += ' inline = [\n'
+ for cmd in commands:
+ content += ' "{}",\n'.format(cmd)
+ content += ' ]\n'
+ content += ' connection {\n'
+ content += ' type = "ssh"\n'
+ content += ' host = "{}"\n'.format(vm.fqdn)
+ content += ' user = "root"\n'
+ content += ' private_key = file("{}")\n'.format(self.private_key_rel)
+ content += ' agent = "false"\n'
+ content += ' }\n'
+ content += ' }\n\n'
+
+ # ## postconfigure actions with puppet
+ if vm.has_puppet:
+ content += self._create_instfile_puppet(vm)
+ else:
+ content += self._only_update_packages(vm)
+
+ # ## Unregistring from RedHat Subscription Management
+ cmd = ('if [ -x /sbin/subscription-manager ] ; then '
+ '/sbin/subscription-manager unregister --no-progress-messages; fi || true')
+ if vm.is_rhel:
+ content += ' provisioner "remote-exec" {\n'
+ content += ' inline = [\n'
+ content += ' "{}"\n'.format(cmd)
+ content += ' ]\n'
+ content += ' when = destroy\n'
+ content += ' connection {\n'
+ content += ' type = "ssh"\n'
+ content += ' host = "{}"\n'.format(vm.fqdn)
+ content += ' user = "root"\n'
+ content += ' }\n'
+ content += ' }\n\n'
+
+ # ## local SSH cleanup on destroy
+ content += textwrap.indent(textwrap.dedent('''\
+ provisioner "local-exec" {{
+ command = "ssh-keygen -R {h} || true"
+ when = destroy
+ }}
+
+ provisioner "local-exec" {{
+ command = "ssh-keygen -R {i} || true"
+ when = destroy
+ }}
+ '''), ' ').format(h=vm.fqdn, i=vm.interfaces[0].address)
+
+ content += '}\n\n'
+
+ return content
+
+ # -------------------------------------------------------------------------·
+ def _create_instfile_nw(self, vm):
+
+ content = ''
+
+ gw4 = None
+ gw6 = None
+ for iface in vm.interfaces:
+
+ content += " network_interface {\n"
+ if iface.address_v4:
+ content += ' ipv4_address = "{}"\n'.format(iface.address_v4)
+ if iface.netmask_v4 is not None:
+ content += ' ipv4_netmask = "{}"\n'.format(iface.netmask_v4)
+ if iface.address_v6:
+ content += ' ipv6_address = "{}"\n'.format(iface.address_v6)
+ if iface.netmask_v6 is not None:
+ content += ' ipv6_netmask = "{}"\n'.format(iface.netmask_v6)
+ content += ' }\n\n'
+
+ if not gw4:
+ gw4 = iface.gateway_v4
+ if not gw6:
+ gw6 = iface.gateway_v6
+
+ if gw4:
+ content += ' ipv4_gateway = "{}"\n'.format(gw4)
+ if gw6:
+ content += ' ipv6_gateway = "{}"\n'.format(gw6)
+
+ ns = ', '.join(map(lambda x: '"{}"'.format(x), vm.nameservers))
+ content += ' dns_server_list = [{}]\n'.format(ns)
+
+ return content
+
+ # -------------------------------------------------------------------------·
+ def _create_instfile_puppet(self, vm):
+
+ ca_cmd = (
+ "ssh -o StrictHostKeyChecking=no {ca} "
+ "'sudo /opt/puppetlabs/bin/puppetserver ca sign --certname {h} || true'").format(
+ ca=self.config.puppetca, h=vm.fqdn)
+
+ command_list = [
+ "/opt/puppetlabs/bin/puppet agent --test || true",
+ "/usr/bin/systemctl start puppet.service",
+ "/usr/bin/systemctl enable puppet.service",
+ "chmod +x /tmp/update-all-packages",
+ "/tmp/update-all-packages",
+ "rm -fv /tmp/update-all-packages",
+ ]
+ if vm.is_rhel:
+ command_list.append("chmod +x /tmp/update-networkmanager")
+ command_list.append("/tmp/update-networkmanager -v")
+ command_list.append("rm -fv /tmp/update-networkmanager")
+ command_list.append("rm -fv /tmp/functions.rc")
+ commands=',\n '.join(map( lambda x: '"' + x + '"', command_list))
+
+ content = textwrap.indent(textwrap.dedent('''\
+ provisioner "local-exec" {{
+ command = "{ca_cmd}"
+ }}
+
+ provisioner "remote-exec" {{
+ inline = [
+ {commands},
+ ]
+ connection {{
+ type = "ssh"
+ host = "{h}"
+ user = "root"
+ private_key = file("{k}")
+ agent = "false"
+ }}
+ }}
+
+ '''), ' ').format(ca_cmd=ca_cmd, commands=commands, h=vm.fqdn, k=self.private_key_rel)
+
+ # Destroy actions with puppet
+ cmd1 = "ssh -o StrictHostKeyChecking=no {ma} "
+ cmd1 += "'sudo /opt/puppetlabs/bin/puppet node deactivate {h} || true'"
+ cmd1 = cmd1.format(ma=self.config.puppetmaster, h=vm.fqdn)
+
+ cmd2 = "ssh -o StrictHostKeyChecking=no {ca} "
+ cmd2 += "'sudo /opt/puppetlabs/bin/puppetserver ca clean --certname {h} || true'"
+ cmd2 = cmd2.format(ca=self.config.puppetca, h=vm.fqdn)
+
+ content += textwrap.indent(textwrap.dedent('''\
+ provisioner "remote-exec" {{
+ inline = [
+ "/usr/bin/systemctl stop puppet.service || true",
+ ]
+ when = destroy
+ connection {{
+ type = "ssh"
+ host = "{h}"
+ user = "root"
+ }}
+ }}
+
+ provisioner "local-exec" {{
+ command = "{cmd1}"
+ when = destroy
+ }}
+
+ provisioner "local-exec" {{
+ command = "{cmd2}"
+ when = destroy
+ }}
+
+ '''), ' ').format(cmd1=cmd1, cmd2=cmd2, h=vm.fqdn, k=self.private_key_rel)
+
+ return content
+
+ # -------------------------------------------------------------------------·
+ def _only_update_packages(self, vm):
+
+ command_list = [
+ "chmod +x /tmp/update-all-packages",
+ "/tmp/update-all-packages",
+ "rm -fv /tmp/update-all-packages",
+ ]
+ if vm.is_rhel:
+ command_list.append("chmod +x /tmp/update-networkmanager")
+ command_list.append("/tmp/update-networkmanager -v")
+ command_list.append("rm -fv /tmp/update-networkmanager")
+ command_list.append("rm -fv /tmp/functions.rc")
+ commands=',\n '.join(map( lambda x: '"' + x + '"', command_list))
+
+ content = textwrap.indent(textwrap.dedent('''\
+ provisioner "remote-exec" {{
+ inline = [
+ {commands},
+ ]
+ connection {{
+ type = "ssh"
+ host = "{h}"
+ user = "root"
+ private_key = file("{k}")
+ agent = "false"
+ }}
+ }}
+
+ '''), ' ').format(commands=commands, h=vm.fqdn, k=self.private_key_rel)
+
+ return content
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2024 by Frank Brehm, Berlin
+@summary: A mixin for the handler module for early used methods.
+"""
+from __future__ import absolute_import, print_function
+
+# Standard module
+import logging
+
+from distutils.version import LooseVersion
+
+from subprocess import PIPE
+
+# Third party modules
+from fb_tools.errors import HandlerError, ExpectedHandlerError, CommandNotFoundError
+
+from fb_pdnstools.server import PowerDNSServer
+from fb_pdnstools.errors import PowerDNSHandlerError
+
+# Own modules
+from ..config import CrTfConfiguration
+
+from ..errors import AbortExecution
+
+from ..terraform.vm import TerraformVm
+
+from ..terraform.disk import TerraformDisk
+
+from ..xlate import XLATOR
+
+__version__ = '0.1.0'
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class CrTfHandlerFirstMixin():
+ """A Mixin module for the handler module for early used methods."""
+
+ # -------------------------------------------------------------------------
+ def incr_verbosity(self, diff=1):
+
+ new_verbose = self.verbose + int(diff)
+ if new_verbose < 0:
+ new_verbose = 0
+ self.verbose = new_verbose
+
+ if self.pdns:
+ self.pdns.verbose = self.verbose
+
+ for vname in self.vsphere:
+ self.vsphere[vname].verbose = self.verbose
+
+ # -------------------------------------------------------------------------
+ def init_handlers(self):
+
+ if not self.config:
+ msg = _("No configuration given before initialisation of handlers.")
+ raise HandlerError(msg)
+
+ if not isinstance(self.config, CrTfConfiguration):
+ raise HandlerError(_(
+ "{n} is not a {e}-instance, but a {w}-instance instead.").format(
+ n='self.config', e='CrTfConfiguration', w=self.config.__class__.__name__))
+
+ TerraformDisk.default_size = self.config.disk_size
+ TerraformDisk.min_size_gb = self.config.disk_min_size
+ TerraformDisk.max_size_gb = self.config.disk_max_size
+
+ TerraformVm.min_rootdisk_size = self.config.root_min_size
+ TerraformVm.max_rootdisk_size = self.config.root_max_size
+
+ LOG.info(_("Initialize some additional handlers."))
+
+ self.terraform_cmd = self.get_command('terraform', quiet=True)
+ if not self.terraform_cmd:
+ raise CommandNotFoundError('terraform')
+ self.check_terraform_version()
+
+ self.pdns = PowerDNSServer(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ master_server=self.config.pdns_master_server,
+ port=self.config.pdns_api_port, key=self.config.pdns_api_key,
+ use_https=self.config.pdns_api_use_https, path_prefix=self.config.pdns_api_path_prefix,
+ simulate=self.simulate, force=self.force, initialized=True,
+ )
+
+ if not self.config.no_pdns:
+ try:
+ api_version = self.pdns.get_api_server_version() # noqa
+ except (PowerDNSHandlerError, ConnectionError) as e:
+ msg = "{c}: {e}".format(c=e.__class__.__name__, e=str(e))
+ raise ExpectedHandlerError(msg)
+
+ # -------------------------------------------------------------------------
+ def check_terraform_version(self):
+ """ Checking, that the called terraform has a minimum version."""
+
+ tf_timeout = 10
+
+ got_tf_version = None
+ LOG.info(_("Checking the terraform version ..."))
+
+ cmd = [str(self.terraform_cmd), 'version']
+ cmd_str = ' '.join(cmd)
+ LOG.debug(_("Executing {!r} ...").format(cmd_str))
+ result = self.run(
+ cmd, may_simulate=False, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
+ LOG.debug(_("Completed process:") + "\n" + str(result))
+
+ if not result.stdout:
+ msg = _("No output on command {!r}.").format(cmd_str)
+ raise ExpectedHandlerError(msg)
+ lines = result.stdout.splitlines()
+
+ if self.verbose > 2:
+ LOG.debug(_("First line:") + '\n' + lines[0])
+ match = self.re_tf_version.search(lines[0])
+ if not match:
+ msg = _("Could not evaluate version output of terraform:") + '\n' + result.stdout
+ raise ExpectedHandlerError(msg)
+
+ got_tf_version = LooseVersion(match.group(1))
+ LOG.info(_("Terraform version: {!r}.").format(str(got_tf_version)))
+
+ if self.min_version_terraform:
+ LOG.debug(_("Checking for {o}{m!r} ...").format(
+ o='>=', m=str(self.min_version_terraform)))
+ if got_tf_version < self.min_version_terraform:
+ msg = _("Invalid version {c!r} of terraform, expected {o}{m!r}.").format(
+ c=str(got_tf_version), o='>=', m=str(self.min_version_terraform))
+ raise ExpectedHandlerError(msg)
+
+ if self.max_version_terraform:
+ LOG.debug(_("Checking for {o}{m!r} ...").format(
+ o='<=', m=str(self.max_version_terraform)))
+ if got_tf_version > self.max_version_terraform:
+ msg = _("Invalid version {c!r} of terraform, expected {o}{m!r}.").format(
+ c=str(got_tf_version), o='<=', m=str(self.max_version_terraform))
+ raise ExpectedHandlerError(msg)
+
+ # -------------------------------------------------------------------------
+ def first_call(self, yaml_file):
+ """First steps until reading the YAML file."""
+
+ if not self.initialized:
+ raise HandlerError(_("{}-object not initialized.").format(self.__class__.__name__))
+
+ try:
+
+ self.exec_init_run()
+
+ LOG.info(_("Go ahead..."))
+
+ self.exec_read_yaml(yaml_file)
+
+ print()
+ LOG.info(_("Initialising VSPhere handlers."))
+ self.init_vspheres(yaml_file)
+
+ return True
+
+ except AbortExecution as e:
+ LOG.warn(str(e))
+ return False
+
+ # -------------------------------------------------------------------------·
+ def exec_init_run(self):
+
+ if self.stop_at_step == 'init':
+ self.incr_verbosity()
+
+ if self.verbose > 2:
+ LOG.debug(_("Current {} object:").format(self.__class__.__name__) + "\n" + str(self))
+
+ LOG.info(_("Finished step {!r}.").format('init'))
+ if self.stop_at_step == 'init':
+ raise AbortExecution('init')
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2024 by Frank Brehm, Berlin
+@summary: A mixin module for the handler for methods for reading and evaluating YAML files.
+"""
+from __future__ import absolute_import, print_function
+
+# Standard module
+import logging
+
+# Third party modules
+import yaml
+import six
+
+from fb_tools.common import pp, to_bool
+from fb_tools.errors import ExpectedHandlerError
+
+# Own modules
+from ..errors import AbortExecution
+
+from ..terraform.vm import TerraformVm
+
+from ..xlate import XLATOR
+
+__version__ = '0.1.1'
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class CrTfHandlerReadMixin():
+ """A mixin module for the handler module for reading and evaluation."""
+
+ # -------------------------------------------------------------------------·
+ def exec_read_yaml(self, yaml_file):
+
+ if self.stop_at_step == 'read-yaml':
+ self.incr_verbosity()
+
+ self.read_yaml_data(yaml_file)
+ self.eval_yaml_data()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in evaluation of YAML data of {f!r}.",
+ "Found {n} errors in evaluation of YAML data of {f!r}.",
+ self.eval_errors).format(n=self.eval_errors, f=str(yaml_file))
+ raise ExpectedHandlerError(msg)
+
+ if not self.vms:
+ msg = _("Did not found any VMs to deploy in file {!r}.").format(str(yaml_file))
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Finished step {!r}.").format('read-yaml'))
+ if self.stop_at_step == 'read-yaml':
+ raise AbortExecution('read-yaml')
+
+ # -------------------------------------------------------------------------·
+ def read_yaml_data(self, yaml_file):
+
+ LOG.info(_("Reading YAML file {!r} ...").format(str(yaml_file)))
+
+ open_opts = {}
+ if six.PY3 and self.config.encoding:
+ open_opts['encoding'] = self.config.encoding
+ open_opts['errors'] = 'surrogateescape'
+
+ try:
+ with open(str(yaml_file), 'r', **open_opts) as fh:
+ self.yaml_data = yaml.full_load(fh)
+ except yaml.YAMLError as e:
+ msg = _("Error in YAML file {f!r}: {e}.").format(
+ f=str(yaml_file), e=e)
+ if hasattr(e, 'problem_mark'):
+ mark = e.problem_mark
+ msg += " " + _("Error position: {li}:{c}").format(
+ li=mark.line + 1, c=mark.column + 1)
+ raise ExpectedHandlerError(msg)
+
+ if self.verbose > 2:
+ LOG.debug(_("Read data from YAML file:") + "\n" + pp(self.yaml_data))
+
+ if not isinstance(self.yaml_data, dict):
+ msg = _(
+ "Data read from YAML file {f!r} are not a dictionary, "
+ "but a {c} object instead.").format(
+ f=str(yaml_file), c=self.yaml_data.__class__.__name__)
+ raise ExpectedHandlerError(msg)
+
+ for key in self.yaml_data.keys():
+ if key.lower() == 'simulate':
+ self.simulate = to_bool(self.yaml_data[key])
+
+ # -------------------------------------------------------------------------·
+ def eval_yaml_data(self):
+
+ self.vm_names = []
+
+ # Searching for default VM definition
+ LOG.debug(_("Searching for default VM definition ..."))
+ for key in self.yaml_data.keys():
+
+ if self.re_default.match(key):
+ vm = self._eval_tpl_vm(name='Default VM', vm_def=self.yaml_data[key])
+ if vm:
+ self.default_vm = vm
+
+ # Searching for VM definitions
+ LOG.debug(_("Searching for VM definitions ..."))
+ for key in self.yaml_data.keys():
+ if self.re_vm_key.match(key):
+ for vm_def in self.yaml_data[key]:
+ vm = self._eval_vm(vm_def, template_vm=self.default_vm)
+ if vm:
+ self.vms.append(vm)
+
+ # Searching for groups
+ for key in self.yaml_data.keys():
+ if self.re_group.match(key):
+ self._eval_vm_groups(self.yaml_data[key], template_vm=self.default_vm, depth=1)
+
+ if self.verbose > 2:
+ vm_list = []
+ for vm in self.vms:
+ vm_list.append(vm.as_dict())
+ LOG.debug(_("Evaluated VMs:") + "\n" + pp(vm_list))
+
+ # -------------------------------------------------------------------------·
+ def _eval_tpl_vm(self, name, vm_def, template_vm=None):
+
+ try:
+ vm = TerraformVm.from_def(
+ vm_def, name=name, is_template=True, template_vm=template_vm, appname=self.appname,
+ verbose=self.verbose, base_dir=self.base_dir, simulate=self.simulate,
+ force=self.force, terminal_has_colors=self.terminal_has_colors)
+ except Exception as e:
+ if self.verbose > 2:
+ self.handle_error(str(e), e.__class__.__name__, True)
+ else:
+ LOG.error(_("{c} in evaluating template VM: {e}").format(
+ c=e.__class__.__name__, e=e))
+ self.eval_errors += 1
+ return None
+
+ if self.verbose > 2:
+ LOG.debug(_(
+ "Defined Terraform Template VM {n!r}:").format(
+ n=vm.name) + "\n" + pp(vm.as_dict()))
+
+ return vm
+
+ # -------------------------------------------------------------------------·
+ def _eval_vm(self, vm_def, template_vm=None):
+
+ try:
+ vm = TerraformVm.from_def(
+ vm_def, is_template=False, template_vm=template_vm, appname=self.appname,
+ verbose=self.verbose, base_dir=self.base_dir, simulate=self.simulate,
+ force=self.force, terminal_has_colors=self.terminal_has_colors)
+ except Exception as e:
+ if self.verbose > 2:
+ self.handle_error(str(e), e.__class__.__name__, True)
+ else:
+ LOG.error(_("{c} in evaluating VM: {e}").format(c=e.__class__.__name__, e=e))
+ self.eval_errors += 1
+ return None
+
+ if self.verbose > 3:
+ LOG.debug(_(
+ "Defined Terraform-VM {n!r}:").format(n=vm.name) + "\n" + pp(vm.as_dict()))
+
+ if vm.name in self.vm_names:
+ LOG.error(_("VM {!r} is already defined.").format(vm.name))
+ self.eval_errors += 1
+ return None
+
+ return vm
+
+ # -------------------------------------------------------------------------·
+ def _eval_vm_groups(self, groups_def, template_vm=None, depth=1):
+
+ if not isinstance(groups_def, list):
+ msg = _("Group definition list is not a list:") + "\n" + pp(groups_def)
+ LOG.error(msg)
+ self.eval_errors += 1
+ return
+
+ if depth >= self.max_groups_depth:
+ LOG.warn(_("Maximum recursion depth for VM groups of {} reached.").format(depth))
+ return
+
+ if self.verbose > 2:
+ LOG.debug(_("Evaluating group list:") + "\n" + pp(groups_def))
+ if self.verbose > 3:
+ LOG.debug(_("Used template: {!r}").format(template_vm))
+
+ for group_def in groups_def:
+ self._eval_vm_group(group_def, template_vm=template_vm, depth=depth)
+
+ # -------------------------------------------------------------------------·
+ def _eval_vm_group(self, group_def, template_vm=None, depth=1):
+
+ if not isinstance(group_def, dict):
+ msg = _("VM definition is not a dictionary:") + "\n" + pp(group_def)
+ LOG.error(msg)
+ self.eval_errors += 1
+ return
+
+ group_template = template_vm
+ group_name = None
+
+ # Searching for the group name ..."
+ for key in group_def.keys():
+ if self.re_group_name.match(key) and str(group_def[key]).strip():
+ group_name = str(group_def[key]).strip()
+
+ if not group_name:
+ LOG.error(_("No group name defined."))
+ return
+
+ # Searching for group default VM definition
+ LOG.debug(_("Searching for group default VM definition in group {!r} ...").format(
+ group_name))
+ for key in group_def.keys():
+
+ if self.re_default.match(key):
+ vm_name = 'Default VM group {!r}'.format(group_name)
+ vm = self._eval_tpl_vm(
+ name=vm_name, vm_def=group_def[key], template_vm=template_vm)
+ if vm:
+ group_template = vm
+ break
+
+ n = None
+ if group_template:
+ n = group_template.name
+ LOG.debug(_("Used template for creating VMs in group {g!r}: {n!r}").format(
+ g=group_name, n=n))
+ if self.verbose > 3:
+ LOG.debug(_("Used template structure:") + "\n" + pp(group_template.as_dict()))
+
+ # Searching for VM definitions
+ LOG.debug(_("Searching for VM definitions in group {!r} ...").format(group_name))
+ for key in group_def.keys():
+ if self.re_vm_key.match(key):
+ for vm_def in group_def[key]:
+ vm = self._eval_vm(vm_def, template_vm=group_template)
+ if vm:
+ self.vms.append(vm)
+
+ # Searching for nested groups
+ for key in group_def.keys():
+ if self.re_group.match(key):
+ self._eval_vm_groups(
+ group_def[key], template_vm=group_template, depth=depth + 1)
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+#!/usr/bin/env pythonV
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2024 by Frank Brehm, Berlin
+@summary: A mixin module for the handler for methods for interacting with VMware/VSphere.
+"""
+from __future__ import absolute_import, print_function
+
+# Standard module
+import copy
+import logging
+import os
+import re
+import sys
+
+from pathlib import Path
+
+from operator import attrgetter
+
+# Third party modules
+from fb_tools.common import pp
+from fb_tools.errors import HandlerError, ExpectedHandlerError
+from fb_vmware.errors import VSphereExpectedError
+from fb_vmware.config import VSPhereConfigInfo
+from fb_vmware.connect import VsphereConnection
+
+# Own modules
+from ..errors import AbortExecution
+
+from ..xlate import XLATOR
+
+__version__ = '0.1.3'
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class CrTfHandlerVmwMixin():
+ """A mixin module for the handler module for interacting with VMware/VSphere.."""
+
+ # -------------------------------------------------------------------------·
+ def exec_collect_folders(self, yaml_file):
+
+ if self.stop_at_step == 'collect-folders':
+ self.incr_verbosity()
+
+ LOG.info(_("Collecting all VMWare and local folders ..."))
+ LOG.info(_("Get vSphere datacenter ..."))
+ for vname in self.vsphere:
+ self.vsphere[vname].get_datacenter()
+
+ LOG.debug(_("Collecting vSphere folders."))
+ self.vsphere_folders = []
+ for vm in self.vms:
+ if vm.folder:
+ if vm.folder not in self.vsphere_folders:
+ self.vsphere_folders.append(vm.folder)
+ self.vsphere_folders.sort(key=str.lower)
+ LOG.debug(_("Collected vSphere folders:") + "\n" + pp(self.vsphere_folders))
+
+ # Set project name and directory
+ yfile = Path(yaml_file)
+ yfile_base = yfile.name
+ yfile_dir = yfile.parent.resolve()
+ (yfile_stem, yfile_ext) = os.path.splitext(yfile_base)
+ self.project_name = yfile_stem
+ LOG.info(_("Project name is {!r}.").format(str(self.project_name)))
+ self.project_dir = yfile_dir / yfile_stem
+ LOG.info(_("Project directory is: {!r}.").format(str(self.project_dir)))
+
+ # Evaluating root terraform directory
+ if not self.is_venv:
+ i = 4
+ cdir = copy.copy(self.project_dir).parent
+ while i > 0:
+ git_dir = cdir / '.git'
+ if git_dir.is_dir():
+ self._terraform_root_dir = cdir
+ break
+ i -= 1
+ if cdir == cdir.parent:
+ break
+ cdir = cdir.parent
+ if not self._terraform_root_dir:
+ msg = _("Did not found root terraform directory above {!r}.").format(
+ str(self.project_dir))
+ LOG.warn(msg)
+
+ LOG.info(_("Full project name: {!r}").format(self.full_project_name))
+
+ LOG.info(_("Finished step {!r}.").format('collect-folders'))
+ if self.stop_at_step == 'collect-folders':
+ raise AbortExecution('collect-folders')
+
+ # -------------------------------------------------------------------------·
+ def init_vspheres(self, yaml_file):
+
+ if self.stop_at_step == 'vmw-init':
+ self.incr_verbosity()
+
+ LOG.debug(_("Initialize VSPhere ..."))
+ # Test for multiple VSphere references
+ found_vspheres = []
+ for vm in self.vms:
+ vname = vm.vsphere
+ if vname not in found_vspheres:
+ found_vspheres.append(vname)
+ if len(found_vspheres) > 1:
+ yaml_file_rel = os.path.relpath(str(yaml_file), os.getcwd())
+ msg = _("There is only one, unique VSPhere definition allowed in a project file.")
+ msg += '\n'
+ msg += _("In {f!r} were found {nr} different VSPhere definitions:").format(
+ f=yaml_file_rel, nr=len(found_vspheres))
+ for vname in sorted(found_vspheres, key=str.lower):
+ msg += '\n * {!r}'.format(vname)
+ raise ExpectedHandlerError(msg)
+
+ self._init_vspheres()
+
+ LOG.info(_("Finished step {!r}.").format('vmw-init'))
+ if self.stop_at_step == 'vmw-init':
+ raise AbortExecution('vmw-init')
+
+ # -------------------------------------------------------------------------·
+ def _init_vspheres(self):
+
+ for vm in self.vms:
+ if vm.vsphere in self.vsphere:
+ continue
+ vname = vm.vsphere
+ LOG.debug(_("Initializing VSphere {!r} ...").format(vname))
+ if vname not in self.config.vsphere:
+ msg = _("VSPhere {!r} not defined in configuration.").format(vname)
+ raise ExpectedHandlerError(msg)
+
+ if not self.vsphere_user and self.config.vsphere[vname].user:
+ LOG.debug(_("Setting {st} to {what!r}.").format(
+ st='handler.vsphere_user', what=self.config.vsphere[vname].user))
+ self.vsphere_user = self.config.vsphere[vname].user
+ if not self.vsphere_password and self.config.vsphere[vname].password:
+ LOG.debug(_("Setting {}.").format('handler.vsphere_password'))
+ self.vsphere_password = self.config.vsphere[vname].password
+
+ try:
+ params = {
+ 'appname': self.appname,
+ 'verbose': self.verbose,
+ 'base_dir': self.base_dir,
+ 'simulate': self.simulate,
+ 'force': self.force,
+ 'terminal_has_colors': self.terminal_has_colors,
+ 'initialized': True,
+ }
+ show_params = copy.copy(params)
+
+ connect_info = VSPhereConfigInfo(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ host=self.config.vsphere[vname].host, port=self.config.vsphere[vname].port,
+ dc=self.config.vsphere[vname].dc, user=self.vsphere_user,
+ password=self.vsphere_password, initialized=True)
+
+ params['connect_info'] = connect_info
+ show_params['connect_info'] = connect_info.as_dict()
+
+ if self.verbose > 1:
+ if self.verbose < 5:
+ show_params['connect_info']['password'] = '******'
+ msg = _("Initialising a {}-object with params:").format('VsphereConnection')
+ msg += '\n' + pp(show_params)
+ LOG.debug(msg)
+
+ vsphere = VsphereConnection(**params)
+ self.vsphere[vname] = vsphere
+
+ except VSphereExpectedError as e:
+ raise ExpectedHandlerError(str(e))
+
+ # -------------------------------------------------------------------------·
+ def test_vsphere_handlers(self):
+
+ if self.stop_at_step == 'vmw-test':
+ self.incr_verbosity()
+
+ for vname in self.vsphere.keys():
+
+ try:
+
+ vsphere = self.vsphere[vname]
+
+ vsphere.get_about()
+ if self.verbose > 2:
+ msg = _("Created {}-object:").format('VsphereConnection')
+ msg += '\n' + pp(vsphere.as_dict())
+ LOG.debug(msg)
+
+ except VSphereExpectedError as e:
+ raise ExpectedHandlerError(str(e))
+
+ LOG.info(_("Finished step {!r}.").format('vmw-test'))
+ if self.stop_at_step == 'vmw-test':
+ raise AbortExecution('vmw-test')
+
+ # -------------------------------------------------------------------------·
+ def assign_default_vmw_values(self):
+ """Assigning not defined templates and clusters of VMs by their
+ appropriate default values."""
+
+ LOG.debug(_(
+ "Assigning not defined templates and clusters of VMs by their "
+ "appropriate default values."))
+
+ for vm in self.vms:
+
+ if not vm.cluster:
+ cl = self.config.vsphere[vm.vsphere].cluster
+ if self.verbose > 1:
+ LOG.debug(_("Setting cluster of {n!r} to {c!r} ...").format(
+ n=vm.name, c=cl))
+ vm.cluster = cl
+
+ if not vm.vm_template:
+ tpl = self.config.vsphere[vm.vsphere].template_name
+ if self.verbose > 1:
+ LOG.debug(_("Setting template of {n!r} to {t!r} ...").format(
+ n=vm.name, t=tpl))
+ vm.vm_template = tpl
+
+ # -------------------------------------------------------------------------·
+ def exec_vmw_clusters(self):
+
+ if self.stop_at_step == 'vmw-clusters':
+ self.incr_verbosity()
+
+ for vname in self.vsphere:
+ LOG.debug(_("Searching for clusters in VSPhere {!r} ...").format(vname))
+ self.vsphere[vname].get_clusters()
+
+ LOG.info(_("Finished step {!r}.").format('vmw-clusters'))
+ if self.stop_at_step == 'vmw-clusters':
+ raise AbortExecution('vmw-clusters')
+
+ # -------------------------------------------------------------------------·
+ def exec_vmw_datastores(self):
+
+ if self.stop_at_step == 'vmw-datastores':
+ self.incr_verbosity()
+
+ nr_total = 0
+
+ for vname in self.vsphere:
+ LOG.debug(_("Searching for datastores in VSPhere {!r} ...").format(vname))
+ self.vsphere[vname].get_datastores()
+ nr_total += len(self.vsphere[vname].datastores.keys())
+
+ if nr_total:
+ msg = ngettext("Found one datastore.", "Found {n} datastores.", nr_total)
+ LOG.debug(msg.format(n=nr_total))
+ else:
+ LOG.error(_("No VSPhere datastores found."))
+
+ LOG.info(_("Finished step {!r}.").format('vmw-datastores'))
+ if self.stop_at_step == 'vmw-datastores':
+ raise AbortExecution('vmw-datastores')
+
+ # -------------------------------------------------------------------------·
+ def exec_vmw_ds_clusters(self):
+
+ nr_total = 0
+
+ if self.stop_at_step == 'vmw-ds-clusters':
+ self.incr_verbosity()
+
+ for vname in self.vsphere:
+ LOG.debug(_("Searching for datastore clusters in VSPhere {!r} ...").format(vname))
+ self.vsphere[vname].get_ds_clusters()
+ nr_total += len(self.vsphere[vname].ds_clusters.keys())
+
+ if nr_total:
+ msg = ngettext(
+ "Found one datastore cluster.",
+ "Found {n} datastore clusters.",
+ nr_total)
+ LOG.debug(msg.format(n=nr_total))
+ else:
+ LOG.warn(_("No VSPhere datastore clusters found."))
+
+ LOG.info(_("Finished step {!r}.").format('vmw-ds-clusters'))
+ if self.stop_at_step == 'vmw-ds-clusters':
+ raise AbortExecution('vmw-ds-clusters')
+
+ # -------------------------------------------------------------------------·
+ def exec_vmw_networks(self):
+
+ if self.stop_at_step == 'vmw-networks':
+ self.incr_verbosity()
+
+ for vname in self.vsphere:
+ LOG.debug(_("Searching for networks in VSPhere {!r} ...").format(vname))
+ self.vsphere[vname].get_networks()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in exploring vSphere {v!r} resources.",
+ "Found {n} errors in exploring vSphere {v!r} resources.",
+ self.eval_errors).format(n=self.eval_errors, v=vname)
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Finished step {!r}.").format('vmw-networks'))
+ if self.stop_at_step == 'vmw-networks':
+ raise AbortExecution('vmw-networks')
+
+ # -------------------------------------------------------------------------·
+ def exec_vmw_templates(self):
+
+ if self.stop_at_step == 'vmw-templates':
+ self.incr_verbosity()
+
+ self.explore_vsphere_templates()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in exploring vSphere templates.",
+ "Found {n} errors in exploring vSphere templates.",
+ self.eval_errors).format(n=self.eval_errors)
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Finished step {!r}.").format('vmw-templates'))
+ if self.stop_at_step == 'vmw-templates':
+ raise AbortExecution('vmw-templates')
+
+ # -------------------------------------------------------------------------·
+ def exec_validate_yaml(self):
+
+ if self.stop_at_step == 'validate-yaml':
+ self.incr_verbosity()
+
+ print()
+ LOG.info(_("Validating information from YAML file ..."))
+
+ self.validate_clusters()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in validating vSphere computing clusters.",
+ "Found {n} errors in validating vSphere computing clusters.",
+ self.eval_errors).format(n=self.eval_errors)
+ raise ExpectedHandlerError(msg)
+
+ self.get_all_vms()
+ self.validate_vms()
+
+ LOG.info(_("Finished step {!r}.").format('validate-yaml'))
+ if self.stop_at_step == 'validate-yaml':
+ raise AbortExecution('validate-yaml')
+
+ # -------------------------------------------------------------------------·
+ def get_all_vms(self):
+
+ LOG.info(_("Got a list of all VMs and templates ..."))
+ self.all_vms = {}
+ re_vm = re.compile(r'.*')
+
+ for vs_name in self.vsphere:
+
+ if vs_name not in self.all_vms:
+ self.all_vms[vs_name] = {}
+
+ vm_list = self.vsphere[vs_name].get_vms(re_vm, name_only=True)
+ for vm_tuple in vm_list:
+ vm_name = vm_tuple[0]
+ vm_path = vm_tuple[1]
+ if vm_name in self.all_vms[vs_name]:
+ self.all_vms[vs_name][vm_name].append(vm_path)
+ else:
+ self.all_vms[vs_name][vm_name] = [vm_path]
+
+ if self.verbose > 2:
+ msg = _("All existing VMs and templates:")
+ msg += '\n' + pp(self.all_vms)
+ LOG.debug(msg)
+
+ # -------------------------------------------------------------------------·
+ def exec_validate_storage(self):
+
+ if self.stop_at_step == 'validate-storage':
+ self.incr_verbosity()
+
+ self.validate_storages()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in validating VM storages.",
+ "Found {n} errors in validating VM storages.",
+ self.eval_errors).format(n=self.eval_errors)
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Finished step {!r}.").format('validate-storage'))
+ if self.stop_at_step == 'validate-storage':
+ raise AbortExecution('validate-storage')
+
+ # -------------------------------------------------------------------------·
+ def exec_validate_iface(self):
+
+ if self.stop_at_step == 'validate-iface':
+ self.incr_verbosity()
+
+ self.validate_interfaces()
+ if self.eval_errors:
+ msg = ngettext(
+ "Found one error in validating VM interfaces.",
+ "Found {n} errors in validating VM interfaces.",
+ self.eval_errors).format(n=self.eval_errors)
+ raise ExpectedHandlerError(msg)
+
+ LOG.info(_("Finished step {!r}.").format('validate-iface'))
+ if self.stop_at_step == 'validate-iface':
+ raise AbortExecution('validate-iface')
+
+ # -------------------------------------------------------------------------·
+ def exec_vsphere_folders(self):
+
+ if self.stop_at_step == 'ensure-vmw-folders':
+ self.incr_verbosity()
+
+ self.ensure_vsphere_folders()
+
+ LOG.info(_("Finished step {!r}.").format('ensure-vmw-folders'))
+ if self.stop_at_step == 'ensure-vmw-folders':
+ raise AbortExecution('ensure-vmw-folders')
+
+ # -------------------------------------------------------------------------·
+ def explore_vsphere_templates(self):
+
+ LOG.info(_("Exploring all vSphere templates ..."))
+
+ for vname in self.vsphere:
+
+ if vname not in self.vsphere_templates:
+ self.vsphere_templates[vname] = {}
+
+ self.config.vsphere[vname].used_templates = []
+
+ for vm in self.vms:
+ template_name = vm.vm_template
+ if template_name:
+ if template_name not in self.config.vsphere[vname].used_templates:
+ self.config.vsphere[vname].used_templates.append(template_name)
+ else:
+ LOG.error(_("VM {!r} has not template defined.").format(vm.name))
+ self.eval_errors += 1
+
+ msg = _("All {} VSPhere templates to explore:").format(vname)
+ msg += "\n" + pp(self.config.vsphere[vname].used_templates)
+ LOG.debug(msg)
+
+ for template_name in self.config.vsphere[vname].used_templates:
+
+ if template_name in self.vsphere_templates[vname]:
+ continue
+
+ LOG.debug(_("Searching for template {t!r} in VSPhere {v!r} ...").format(
+ t=template_name, v=vname))
+ re_vm = re.compile(r'^' + re.escape(template_name) + r'$', re.IGNORECASE)
+ vm_list = self.vsphere[vname].get_vms(re_vm, as_obj=True, stop_at_found=True)
+ if vm_list:
+ vm = vm_list[0]
+ tname = vm.name.lower()
+ if tname not in self.vsphere_templates[vname]:
+ self.vsphere_templates[vname][template_name] = vm
+ else:
+ LOG.error(_("Template {t!r} not found in VSPhere {v!r}.").format(
+ t=template_name, v=vname))
+ self.eval_errors += 1
+
+ if self.verbose > 2:
+ msg = _("All explored vSphere templates:")
+ out_dict = {}
+ for vname in self.vsphere_templates:
+ out_dict[vname] = {}
+ for tname in self.vsphere_templates[vname]:
+ out_dict[vname][tname] = self.vsphere_templates[vname][tname].as_dict()
+ msg += "\n" + pp(out_dict)
+ LOG.debug(msg)
+
+ # -------------------------------------------------------------------------·
+ def validate_clusters(self):
+
+ print()
+ LOG.info(_("Validating existence of computing clusters of the VMs."))
+
+ clusters = {}
+
+ for vm in self.vms:
+
+ vname = vm.vsphere
+ if vname not in clusters:
+ clusters[vname] = {}
+
+ if vm.cluster in clusters:
+ clusters[vname][vm.cluster].append(vm.name)
+ else:
+ clusters[vname][vm.cluster] = [vm.name]
+
+ for vname in clusters.keys():
+ for cluster in clusters[vname].keys():
+
+ vms = clusters[vname][cluster]
+
+ cl = str(cluster)
+ LOG.debug(_(
+ "Checking existence of computing cluster {c!r} in VSPhere {v!r} ...").format(
+ c=cl, v=vname))
+
+ vsphere = self.vsphere[vname]
+ vmw_cluster = vsphere.get_cluster_by_name(cl)
+ if vmw_cluster:
+ if self.verbose > 1:
+ LOG.debug(_(
+ "Found computing cluster {cl!r} in VSPhere {v!r} (defined for VMs "
+ "{vms}).").format(cl=vmw_cluster.name, v=vname, vms=pp(vms)))
+ else:
+ LOG.error(_(
+ "Computing cluster {cl!r} (defined for VMs {vms}) in VSPhere {v!r} not "
+ "found.").format(cl=cl, vms=pp(vms), v=vname))
+ self.eval_errors += 1
+
+ # -------------------------------------------------------------------------·
+ def validate_vms(self):
+
+ print()
+ LOG.info(_("Validating existence of VMs in VMWare."))
+ vms2perform = []
+
+ for vm in sorted(self.vms, key=attrgetter('tf_name')):
+
+ print(" * {} ".format(vm.fqdn), end='', flush=True)
+ if self.verbose:
+ print()
+ vs_name = vm.vsphere
+ vsphere = self.vsphere[vs_name]
+
+ vm_paths = None
+ if vs_name in self.all_vms:
+ if vm.fqdn in self.all_vms[vs_name]:
+ vm_paths = self.all_vms[vs_name][vm.fqdn]
+
+ if vm_paths:
+ msg = _('[{m}] - VM is already existing in VSphere {v!r}, path {p!r}.').format(
+ m=self.colored('Existing', 'YELLOW'), v=vs_name, p=pp(vm_paths))
+ print(msg, end='', flush=True)
+ if self.verbose:
+ print()
+
+ vm_info = vsphere.get_vm(vm.fqdn, vsphere_name=vs_name, as_obj=True)
+ if self.verbose > 2:
+ LOG.debug(_("VM info:") + "\n" + pp(vm_info.as_dict(bare=True)))
+ ds = vm_info.config_path_storage
+ LOG.debug(_("Datastore of VM {vm!r}: {ds!r}.").format(vm=vm.name, ds=ds))
+ vm.datastore = ds
+ vm.already_existing = True
+ self.existing_vms.append(vm_info)
+
+ else:
+
+ print('[{}] '.format(self.colored('OK', 'GREEN')), end='', flush=True)
+ vm.already_existing = False
+
+ vms2perform.append(vm)
+ print()
+
+ self.vms = vms2perform
+
+ print()
+
+ if not len(self.vms):
+ print()
+ print(self.colored('*' * 60, ('BOLD', 'RED')), file=sys.stderr)
+ print(self.colored('* ' + _('CAUTION!'), ('BOLD', 'RED')), file=sys.stderr)
+ print(self.colored('*' * 60, ('BOLD', 'RED')), file=sys.stderr)
+ print()
+ print(
+ self.colored(_('Did not found any VM to deploy!'), ('BOLD', 'RED')),
+ file=sys.stderr)
+ print()
+ raise ExpectedHandlerError(_("No VMs to deploy"))
+
+ # -------------------------------------------------------------------------·
+ def validate_storages(self):
+
+ self._validate_ds_clusters()
+ self._validate_datastores()
+
+ if self.verbose:
+ if self.used_dc_clusters:
+ out_lines = []
+ for vs_name in self.used_dc_clusters:
+ for cluster in self.used_dc_clusters[vs_name]:
+ out_lines.append(' * VSphere {v!r}: {c}'.format(
+ v=vs_name, c=cluster))
+ out = '\n'.join(out_lines)
+ LOG.debug(_("Used datastore clusters:") + "\n" + out)
+ else:
+ LOG.debug(_("No datastore clusters are used."))
+ if self.used_datastores:
+ out_lines = []
+ for vs_name in self.used_datastores:
+ for ds in self.used_datastores[vs_name]:
+ out_lines.append(' * VSphere {v!r}: {ds}'.format(v=vs_name, ds=ds))
+ out = '\n'.join(out_lines)
+ LOG.debug(_("Used datastors:") + "\n" + out)
+ else:
+ LOG.debug(_("No datastores are used."))
+
+ # -------------------------------------------------------------------------·
+ def _validate_ds_clusters(self):
+
+ LOG.info(_("Validating given datastore clusters of VMs ..."))
+
+ for vm in self.vms:
+
+ if not vm.ds_cluster:
+ continue
+
+ self._validate_dscluster_vm(vm)
+
+ # -------------------------------------------------------------------------·
+ def _validate_dscluster_vm(self, vm):
+
+ if self.verbose > 2:
+ LOG.debug('Disk mappings:' + '\n' + pp(vm.disks._map))
+
+ needed_gb = 0.0
+ if not vm.already_existing:
+ for unit_number in vm.disks.keys():
+ disk = vm.disks[unit_number]
+ needed_gb += disk.size_gb
+
+ vs_name = vm.vsphere
+ vsphere = self.vsphere[vs_name]
+
+ found = False
+ for cluster_name in vsphere.ds_clusters.keys():
+ if cluster_name.lower() == vm.ds_cluster.lower():
+ if self.verbose > 2:
+ LOG.debug(_(
+ "Found datastore cluster {c!r} in VSphere {v!r} for VM {n!r}.").format(
+ n=vm.name, v=vs_name, c=vm.ds_cluster))
+ if vm.ds_cluster != cluster_name:
+ LOG.debug(_("Setting datastore cluster for VM {n!r} to {c!r} ...").format(
+ n=vm.name, c=cluster_name))
+ vm.ds_cluster = cluster_name
+ ds_cluster = vsphere.ds_clusters[cluster_name]
+ if self.verbose > 2:
+ LOG.debug(_(
+ "Free space of cluster {c!r} in VSphere {v!r} before provisioning: "
+ "{a:0.1f} GiB.").format(
+ c=cluster_name, v=vs_name, a=ds_cluster.avail_space_gb))
+ if ds_cluster.avail_space_gb < needed_gb:
+ LOG.error(_(
+ "Datastore cluster {d!r} in VSphere {v!r} has not sufficient space for "
+ "storage of VM {vm!r} (needed {n:0.1f} GiB, available {a:0.1f} "
+ "GiB).").format(
+ d=cluster_name, v=vs_name, vm=vm.name, n=needed_gb,
+ a=ds_cluster.avail_space_gb))
+ self.eval_errors += 1
+ else:
+ ds_cluster.calculated_usage += needed_gb
+ if self.verbose > 1:
+ LOG.debug(_(
+ "Free space in cluster {c!r} in VSphere {v!r} after provisioning: "
+ "{a:0.1f} GiB.").format(
+ c=cluster_name, v=vs_name, a=ds_cluster.avail_space_gb))
+ found = True
+ if vs_name not in self.used_dc_clusters:
+ self.used_dc_clusters[vs_name] = []
+ if cluster_name not in self.used_dc_clusters[vs_name]:
+ self.used_dc_clusters[vs_name].append(cluster_name)
+ break
+
+ if not found:
+ LOG.error(_("Datastore cluster {c!r} of VM {n!r} not found in VSphere {v!r}.").format(
+ n=vm.name, c=vm.ds_cluster, v=vs_name))
+ self.eval_errors += 1
+
+ # -------------------------------------------------------------------------·
+ def _validate_datastores(self):
+
+ LOG.info(_("Validating given datastores of VMs and assign failing ..."))
+
+ for vm in self.vms:
+
+ if vm.ds_cluster:
+ if vm.datastore:
+ LOG.debug(_("Removing defined datastore {d!r} for VM {n!r} ...").format(
+ d=vm.datastore, n=vm.name))
+ vm.datastore = None
+ continue
+
+ self._validate_ds_vm(vm)
+
+ # -------------------------------------------------------------------------·
+ def _validate_ds_vm(self, vm):
+
+ needed_gb = 0.0
+ if not vm.already_existing:
+ for unit_number in vm.disks.keys():
+ disk = vm.disks[unit_number]
+ needed_gb += disk.size_gb
+
+ vs_name = vm.vsphere
+ vsphere = self.vsphere[vs_name]
+
+ vm_cluster = None
+ for cluster in vsphere.clusters:
+ if cluster.name.lower() == vm.cluster.lower():
+ vm_cluster = cluster
+ break
+ if not vm_cluster:
+ msg = _("Did not found cluster object {c!r} for VM {n!r}.").format(
+ c=vm.cluster, n=vm.name)
+ raise HandlerError(msg)
+
+ if vm.datastore:
+ found = False
+ found_ds_name = None
+ for ds_name in vsphere.datastores:
+ if ds_name.lower() == vm.datastore.lower():
+ if self.verbose > 2:
+ LOG.debug(_("Found datastore {d!r} for VM {n!r} in VSPhere {v!r}.").format(
+ n=vm.name, d=vm.datastore, v=vs_name))
+ if ds_name not in vm_cluster.datastores:
+ LOG.warn(_("Datastore {d!r} not available in cluster {c!r}.").format(
+ d=ds_name, c=vm.cluster))
+ break
+ if vm.datastore != ds_name:
+ LOG.debug(_("Setting datastore for VM {n!r} to {d!r} ...").format(
+ n=vm.name, d=ds_name))
+ vm.datastore = ds_name
+ ds = vsphere.datastores[ds_name]
+ if ds.avail_space_gb < needed_gb:
+ LOG.error(_(
+ "Datastore {d!r} has not sufficient space for storage of VM "
+ "{v!r} (needed {n:0.1f} GiB, available {a:0.1f} GiB).").format(
+ d=ds_name, v=vm.name, n=needed_gb, a=ds.avail_space_gb))
+ self.eval_errors += 1
+ else:
+ ds.calculated_usage += needed_gb
+ found = True
+ found_ds_name = ds_name
+ break
+ if not found:
+ LOG.error(_("Datastore {d!r} of VM {n!r} not found in VSPhere {v!r}.").format(
+ n=vm.name, d=vm.datastore, v=vs_name))
+ self.eval_errors += 1
+ if vs_name not in self.used_datastores:
+ self.used_datastores[vs_name] = []
+ if found_ds_name not in self.used_datastores[vs_name]:
+ self.used_datastores[vs_name].append(found_ds_name)
+ return
+
+ ds_name = vsphere.datastores.find_ds(
+ needed_gb, vm.ds_type, use_ds=copy.copy(vm_cluster.datastores), no_k8s=True)
+ if ds_name:
+ LOG.debug(_("Found datastore {d!r} for VM {n!r} in VSPhere {v!r}.").format(
+ d=ds_name, n=vm.name, v=vs_name))
+ vm.datastore = ds_name
+ if vs_name not in self.used_datastores:
+ self.used_datastores[vs_name] = []
+ if ds_name not in self.used_datastores[vs_name]:
+ self.used_datastores[vs_name].append(ds_name)
+ else:
+ self.eval_errors += 1
+
+ # -------------------------------------------------------------------------·
+ def validate_interfaces(self):
+
+ LOG.info(_("Validating interfaces of VMs and assign networks ..."))
+ for vm in self.vms:
+ self._validate_interfaces_vm(vm)
+
+ if self.verbose > 2:
+ LOG.debug(_("Validated FQDNs:") + "\n" + pp(self.fqdns))
+ LOG.debug(_("Validated Addresses:") + "\n" + pp(self.addresses))
+
+ if self.verbose:
+
+ lines = []
+ for vs_name in self.used_networks:
+ for nw in self.used_networks[vs_name]:
+ lines.append(' * VSphere {v!r}: {n}'.format(
+ v=vs_name, n=nw))
+ out = '\n'.join(lines)
+ LOG.debug(_("Used networks:") + "\n" + out)
+
+ lines = []
+ for pair in self.dns_mapping['forward']:
+ line = ' * {n!r} => {a!r}'.format(n=pair[0], a=str(pair[1]))
+ lines.append(line)
+ LOG.debug(_("Used forward DNS entries:") + "\n" + '\n'.join(lines))
+
+ lines = []
+ for pair in self.dns_mapping['reverse']:
+ line = ' * {a!r} => {n!r}'.format(n=pair[1], a=str(pair[0]))
+ lines.append(line)
+ LOG.debug(_("Used reverse DNS entries:") + "\n" + '\n'.join(lines))
+
+ # -------------------------------------------------------------------------·
+ def _validate_interfaces_vm(self, vm):
+
+ vs_name = vm.vsphere
+ LOG.debug(_("Checking interfaces of VM {n!r} in VSPhere {v!r} ...").format(
+ n=vm.name, v=vs_name))
+
+ if not vm.interfaces:
+ LOG.error(_("No interfaces defined for VM {!r}.").format(vm.name))
+ self.eval_errors += 1
+ return
+
+ vsphere = self.vsphere[vs_name]
+
+ vm_cluster = None
+ for cluster in vsphere.clusters:
+ if cluster.name.lower() == vm.cluster.lower():
+ vm_cluster = cluster
+ break
+ if not vm_cluster:
+ msg = _("Did not found cluster object {c!r} for VM {n!r}.").format(
+ c=vm.cluster, n=vm.name)
+ raise HandlerError(msg)
+
+ i = -1
+ for iface in vm.interfaces:
+ i += 1
+ self._validate_interface_of_vm(
+ vm_name=vm.name, iface=iface, vs_name=vs_name, vm_cluster=vm_cluster, i=i)
+
+ # -------------------------------------------------------------------------·
+ def _validate_interface_of_vm(self, vm_name, iface, vs_name, vm_cluster, i=0):
+
+ vsphere = self.vsphere[vs_name]
+
+ if self.verbose > 1:
+ LOG.debug(_("Checking interface {i} of VM {n!r} ...").format(
+ i=i, n=vm_name))
+
+ if not iface.address:
+ LOG.error(_("Interface {i} of VM {n!r} has no defined address.").format(
+ i=i, n=vm_name))
+ self.eval_errors += 1
+ return
+
+ if not iface.fqdn:
+ LOG.error(_("Interface {i} of VM {n!r} has no defined FQDN.").format(
+ i=i, n=vm_name))
+ self.eval_errors += 1
+ return
+
+ if iface.fqdn in self.fqdns:
+ LOG.error(_(
+ "FQDN {f!r} already defined for VM {va!r}({ia}) should be set "
+ "for interface {ib} of {vb!r}.").format(
+ f=iface.fqdn, va=self.fqdns[iface.fqdn][0], ia=self.fqdns[iface.fqdn][1],
+ ib=i, vb=vm_name))
+ self.eval_errors += 1
+ return
+
+ self.fqdns[iface.fqdn] = (vm_name, i)
+
+ if iface.address_v4:
+ if iface.address_v4 in self.addresses:
+ LOG.error(_(
+ "IPv4 address {a} already defined for VM {va!r}({ia}) should be set "
+ "for interface {ib} of {vb!r}.").format(
+ a=iface.address_v4, va=self.fqdns[iface.fqdn][0],
+ ia=self.fqdns[iface.fqdn][1], ib=i, vb=vm_name))
+ self.eval_errors += 1
+ return
+ self.addresses[iface.address_v4] = (vm_name, i)
+ pair = (iface.fqdn, iface.address_v4)
+ self.dns_mapping['forward'].append(pair)
+ pair = (iface.address_v4, iface.fqdn)
+ self.dns_mapping['reverse'].append(pair)
+
+ if iface.address_v6:
+ if iface.address_v6 in self.addresses:
+ LOG.error(_(
+ "IPv6 address {a} already defined for VM {va!r}({ia}) should be set "
+ "for interface {ib} of {vb!r}.").format(
+ a=iface.address_v6, va=self.fqdns[iface.fqdn][0],
+ ia=self.fqdns[iface.fqdn][1], ib=i, vb=vm_name))
+ self.eval_errors += 1
+ return
+ self.addresses[iface.address_v6] = (vm_name, i)
+ pair = (iface.fqdn, iface.address_v6)
+ self.dns_mapping['forward'].append(pair)
+ pair = (iface.address_v6, iface.fqdn)
+ self.dns_mapping['reverse'].append(pair)
+
+ network = iface.network
+ if network:
+ if network not in vsphere.networks:
+ LOG.error(_(
+ "Could not find network {n!r} for VM {v!r}, interface {i}.").format(
+ n=network, v=vm_name, i=i))
+ self.eval_errors += 1
+ return
+ else:
+ network = vsphere.networks.get_network_for_ip(
+ iface.address_v4, iface.address_v6)
+ if not network:
+ self.eval_errors += 1
+ return
+ iface.network = network
+ LOG.debug(_("Found network {n!r} for interface {i} of VM {v!r}.").format(
+ n=network, i=i, v=vm_name))
+
+ if network not in vm_cluster.networks:
+ LOG.error(_(
+ "Network {n!r} for interface {i} of VM {v!r} not available in "
+ "cluster {c!r}.").format(n=network, v=vm_name, i=i, c=vm_cluster.name))
+ self.eval_errors += 1
+ return
+ LOG.debug(_("Network {n!r} is available in cluster {c!r}.").format(
+ n=network, c=vm_cluster.name))
+
+ net = vsphere.networks[network]
+ if not iface.gateway:
+ LOG.debug(_("Setting gateway of interface {i} of VM {v!r} to {g}.").format(
+ i=i, v=vm_name, g=net.gateway))
+ iface.gateway = net.gateway
+
+ if net.network:
+ if net.network.version == 4:
+ if iface.netmask_v4 is None:
+ iface.netmask_v4 = net.network.prefixlen
+ else:
+ if iface.netmask_v6 is None:
+ iface.netmask_v6 = net.network.prefixlen
+
+ if vs_name not in self.used_networks:
+ self.used_networks[vs_name] = []
+ if network not in self.used_networks[vs_name]:
+ self.used_networks[vs_name].append(network)
+
+ # -------------------------------------------------------------------------·
+ def ensure_vsphere_folders(self):
+
+ vs_name = None
+ for vs_name in self.vsphere.keys():
+ break
+ vsphere = self.vsphere[vs_name]
+
+ print()
+ LOG.info(_("Ensuring existence of all necessary vSphere VM folders."))
+ vsphere.ensure_vm_folders(copy.copy(self.vsphere_folders))
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+../../locale
\ No newline at end of file
--- /dev/null
+#!/bin/env python3
+# -*- coding: utf-8 -*-
+
+__version__ = '1.0.0'
+
+# vim: ts=4 et list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2024 by Frank Brehm, Berlin
+@summary: The module for a VM disk destinated to Terraform
+"""
+from __future__ import absolute_import
+
+# Standard modules
+import logging
+import copy
+
+try:
+ from collections.abc import MutableMapping
+except ImportError:
+ from collections import MutableMapping
+
+from numbers import Number
+
+# Third party modules
+
+# Own modules
+from fb_tools.obj import FbBaseObject
+
+from ..config import CrTfConfiguration
+
+from ..errors import TerraformVmDefinitionError
+from ..errors import TerraformVmTooManyDisksError
+
+from ..xlate import XLATOR
+
+__version__ = '1.3.0'
+
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class TerraformDisk(FbBaseObject):
+ """A class encapsulating a disk of a VirtualMachine managed by Terraform."""
+
+ default_size = CrTfConfiguration.default_disk_size
+
+ min_size_gb = CrTfConfiguration.default_disk_min_size
+ max_size_gb = CrTfConfiguration.default_disk_max_size
+
+ disks_per_scsi_ctrlr = 15
+ max_scsi_ctrlrs = 4
+ max_scsi_disks = disks_per_scsi_ctrlr * max_scsi_ctrlrs
+
+ msg_no_disk_dict = _("Object {o!r} is not a {e} object.")
+
+ # -------------------------------------------------------------------------
+ def __init__(
+ self, name=None, root_disk=False, unit_number=0, size_gb=None,
+ version=__version__, initialized=False, *args, **kwargs):
+
+ self._name = 'disk'
+ self._root_disk = bool(root_disk)
+ self._unit_number = 0
+ self._size_gb = self.default_size
+
+ super(TerraformDisk, self).__init__(
+ version=version,
+ initialized=False,
+ *args, **kwargs,
+ )
+
+ if name:
+ self.name = name
+
+ self._set_unit_number(unit_number)
+ if size_gb is not None:
+ self.size_gb = size_gb
+
+ self.initialized = initialized
+
+ # -----------------------------------------------------------
+ @property
+ def name(self):
+ """The name of the disk."""
+ return self._name
+
+ @name.setter
+ def name(self, value):
+ if value is None:
+ msg = _("The name of a disk don't may be None.")
+ raise TerraformVmDefinitionError(msg)
+ v = str(value).strip()
+ if v == '':
+ msg = _("The name of a disk don't may be empty.")
+ raise TerraformVmDefinitionError(msg)
+ self._name = v
+
+ # -----------------------------------------------------------
+ @property
+ def root_disk(self):
+ """A flag indicating, that this is the root disk of a VM."""
+ return self._root_disk
+
+ @root_disk.setter
+ def root_disk(self, value):
+ self._root_disk = bool(value)
+
+ # -----------------------------------------------------------
+ @property
+ def unit_number(self):
+ """Number of CPUs of the VM (num_cores_per_socket is always 1)."""
+ return self._unit_number
+
+ # -----------------------------------------------------------
+ @property
+ def size_gb(self):
+ """Size of the disk in GiB."""
+ return self._size_gb
+
+ @size_gb.setter
+ def size_gb(self, value):
+ val = float(value)
+ msg = _("Invalid disk size {n} - size must be {min} <= SIZE <= {max}.").format(
+ n=val, min=self.min_size_gb, max=self.max_size_gb)
+ if val < self.min_size_gb or val > self.max_size_gb:
+ raise ValueError(msg)
+ self._size_gb = val
+
+ # -------------------------------------------------------------------------
+ def as_dict(self, short=True):
+ """
+ Transforms the elements of the object into a dict
+
+ @param short: don't include local properties in resulting dict.
+ @type short: bool
+
+ @return: structure as dict
+ @rtype: dict
+ """
+
+ res = super(TerraformDisk, self).as_dict(short=short)
+ res['name'] = self.name
+ res['default_size'] = self.default_size
+ res['max_size_gb'] = self.max_size_gb
+ res['min_size_gb'] = self.min_size_gb
+ res['root_disk'] = self.root_disk
+ res['size_gb'] = self.size_gb
+ res['unit_number'] = self.unit_number
+
+ return res
+
+ # -------------------------------------------------------------------------
+ def __repr__(self):
+ """Typecast into a string for reproduction."""
+ out = '<%s(' % (self.__class__.__name__)
+
+ fields = []
+ fields.append('name={!r}'.format(self.name))
+ fields.append('root_disk={!r}'.format(self.root_disk))
+ fields.append('unit_number={!r}'.format(self.unit_number))
+ fields.append('size_gb={!r}'.format(self.size_gb))
+ fields.append('appname={!r}'.format(self.appname))
+ fields.append('verbose={!r}'.format(self.verbose))
+ fields.append('base_dir={!r}'.format(self.base_dir))
+ fields.append('initialized={!r}'.format(self.initialized))
+
+ out += ', '.join(fields) + ')>'
+ return out
+
+ # -------------------------------------------------------------------------
+ def _set_unit_number(self, value):
+ val = int(value)
+ if self.root_disk:
+ self._unit_number = 0
+ if val != 0:
+ msg = _("A root disk must have always the unit number 0 (given {!r}).").format(
+ value)
+ raise ValueError(msg)
+ return
+ msg = _("Invalid unit number {n} - number must be {min} <= NUMBER <= {max}.").format(
+ n=val, min=1, max=64)
+ if val < 1 or val > 64:
+ raise ValueError(msg)
+
+ self._unit_number = val
+
+ # -------------------------------------------------------------------------
+ def __copy__(self):
+
+ if self.verbose > 3:
+ LOG.debug(_("Copying Terraform disk object with unit ID {}.").format(self.unit_number))
+
+ disk = self.__class__(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir, name=self.name,
+ initialized=self.initialized, root_disk=self.root_disk, unit_number=self.unit_number,
+ size_gb=self.size_gb)
+
+ return disk
+
+ # -------------------------------------------------------------------------
+ def __eq__(self, other):
+
+ if not isinstance(other, TerraformDisk):
+ raise TypeError(self.msg_no_disk_dict.format(o=other, e='TerraformDisk'))
+
+ if self.name != other.name:
+ return False
+ if self.unit_number != other.unit_number:
+ return False
+ if self.root_disk != other.root_disk:
+ return False
+ if self.size_gb != other.size_gb:
+ return False
+
+ return True
+
+
+# =============================================================================
+class TerraformDiskDict(MutableMapping, FbBaseObject):
+ """
+ A dictionary containing TerraformDisk objects.
+ It works like a dict.
+ i.e.:
+ * disks = TerraformDiskDict(TerraformDisk(name='disk0',unit_number=0, root=True, size_gb=48, ...))
+ * disks[0] returns the first TerraformDisk object in the list of sorted disk names
+ * disks['disk0'] returns the TerraformDisk object with the name 'disk0'.
+ """
+
+ msg_invalid_disk_type = _("Invalid disk type {{!r}} to set, only {} allowed.").format(
+ 'TerraformDisk')
+ msg_key_not_name = _("The key {k!r} must be equal to the name of the disk {n!r}.")
+ msg_none_type_error = _("None type as key is not allowed.")
+ msg_empty_key_error = _("Empty key {!r} is not allowed.")
+ msg_no_disk_dict = _("Object {o!r} is not a {e} object.")
+
+ # -------------------------------------------------------------------------
+ def __init__(
+ self, appname=None, verbose=0, version=__version__, base_dir=None, initialized=False,
+ *disks):
+
+ self._map = dict()
+
+ super(TerraformDiskDict, self).__init__(
+ appname=appname, verbose=verbose, base_dir=base_dir, initialized=False)
+
+ for disk in disks:
+ self.append(disk)
+
+ if initialized:
+ self.initialized = True
+
+ # -------------------------------------------------------------------------
+ def _set_item(self, key, disk):
+
+ if not isinstance(disk, TerraformDisk):
+ raise TypeError(self.msg_invalid_disk_type.format(disk.__class__.__name__))
+
+ if disk.name != key:
+ msg = self.msg_key_not_name.format(k=key, n=disk.name)
+
+ self._map[key] = disk
+
+ # -------------------------------------------------------------------------
+ def append(self, disk):
+
+ if not isinstance(disk, TerraformDisk):
+ raise TypeError(self.msg_invalid_disk_type.format(disk.__class__.__name__))
+
+ self._set_item(disk.name, disk)
+
+ # -------------------------------------------------------------------------
+ def _get_item(self, key):
+
+ if key is None:
+ raise TypeError(self.msg_none_type_error)
+
+ if isinstance(key, Number):
+ num = int(key)
+ keys = self.keys()
+ name = keys[num]
+ return self._map[name]
+
+ return self._map[key]
+
+ # -------------------------------------------------------------------------
+ def get(self, key):
+ return self._get_item(key)
+
+ # -------------------------------------------------------------------------
+ def _del_item(self, key, strict=True):
+
+ if key is None:
+ raise TypeError(self.msg_none_type_error)
+
+ name = str(key)
+ if isinstance(key, Number):
+ num = int(key)
+ keys = self.keys()
+ name = keys[num]
+
+ if not strict and name not in self._map:
+ return
+
+ del self._map[name]
+
+ # -------------------------------------------------------------------------
+ # The next five methods are requirements of the ABC.
+ def __setitem__(self, key, value):
+ self._set_item(key, value)
+
+ # -------------------------------------------------------------------------
+ def __getitem__(self, key):
+ return self._get_item(key)
+
+ # -------------------------------------------------------------------------
+ def __delitem__(self, key):
+ self._del_item(key)
+
+ # -------------------------------------------------------------------------
+ def __iter__(self):
+
+ for name in self.keys():
+ yield name
+
+ # -------------------------------------------------------------------------
+ def __len__(self):
+ return len(self._map)
+
+ # -------------------------------------------------------------------------
+ # The next methods aren't required, but nice for different purposes:
+ def __str__(self):
+ """returns simple dict representation of the mapping"""
+ return str(self._map)
+
+ # -------------------------------------------------------------------------
+ def __repr__(self):
+ '''echoes class, id, & reproducible representation in the REPL'''
+ return '{}, {}({})'.format(
+ super(TerraformDiskDict, self).__repr__(),
+ self.__class__.__name__,
+ self._map)
+
+ # -------------------------------------------------------------------------
+ def __contains__(self, key):
+
+ if key is None:
+ raise TypeError(self.msg_none_type_error)
+
+ return key in self._map
+
+ # -------------------------------------------------------------------------
+ def keys(self):
+
+ return sorted(self._map.keys(), key=str.lower)
+
+ # -------------------------------------------------------------------------
+ def items(self):
+
+ item_list = []
+
+ for name in self.keys():
+ item_list.append((name, self._map[name]))
+
+ return item_list
+
+ # -------------------------------------------------------------------------
+ def values(self):
+
+ value_list = []
+ for name in self.keys():
+ value_list.append(self._map[name])
+ return value_list
+
+ # -------------------------------------------------------------------------
+ def __eq__(self, other):
+
+ if not isinstance(other, TerraformDiskDict):
+ raise TypeError(self.msg_no_disk_dict.format(o=other, e='TerraformDiskDict'))
+
+ return self._map == other._map
+
+ # -------------------------------------------------------------------------
+ def __ne__(self, other):
+
+ if not isinstance(other, TerraformDiskDict):
+ raise TypeError(self.msg_no_disk_dict.format(o=other, e='TerraformDiskDict'))
+
+ return self._map != other._map
+
+ # -------------------------------------------------------------------------
+ def pop(self, key, *args):
+
+ if key is None:
+ raise TypeError(self.msg_none_type_error)
+
+ return self._map.pop(key, *args)
+
+ # -------------------------------------------------------------------------
+ def popitem(self):
+
+ if not len(self._map):
+ return None
+
+ name = self.keys()[0]
+ disk = self._map[name]
+ del self._map[name]
+ return (name, disk)
+
+ # -------------------------------------------------------------------------
+ def clear(self):
+ self._map = dict()
+
+ # -------------------------------------------------------------------------
+ def setdefault(self, key, default):
+
+ if key is None:
+ raise TypeError(self.msg_none_type_error)
+
+ if not isinstance(default, TerraformDisk):
+ raise TypeError(self.msg_invalid_disk_type.format(default.__class__.__name__))
+
+ if key in self._map:
+ return self._map[unit_number]
+
+ self._set_item(key, default)
+ return default
+
+ # -------------------------------------------------------------------------
+ def update(self, other):
+
+ if isinstance(other, TerraformDiskDict) or isinstance(other, dict):
+ for name in other.keys():
+ self._set_item(name, other[name])
+ return
+
+ for tokens in other:
+ key = tokens[0]
+ value = tokens[1]
+ self._set_item(key, value)
+
+ # -------------------------------------------------------------------------
+ def as_dict(self, short=True):
+
+ res = {}
+ res = super(TerraformDiskDict, self).as_dict(short=short)
+ res['map'] = {}
+
+ for name in self._map:
+ res['map'][name] = self._map[name].as_dict(short)
+
+ return res
+
+ # -------------------------------------------------------------------------
+ def as_list(self, short=True):
+
+ res = []
+ for name in self.keys():
+ res.append(self._map[name].as_dict(short))
+ return res
+
+ # -------------------------------------------------------------------------
+ def __copy__(self):
+
+ if self.verbose > 2:
+ LOG.debug(_("Copying Terraform disk dictionary ..."))
+
+ new = self.__class__(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ initialized=False)
+
+ for name in self._map:
+ new.append(copy.copy(self._map[name]))
+
+ if self.initialized:
+ new.initialized = True
+
+ return new
+
+ # -------------------------------------------------------------------------
+ def get_ctrlr_count(self):
+
+ if len(self) <= 1:
+ return 1
+ if len(self) >= TerraformDisk.max_scsi_ctrlrs:
+ return TerraformDisk.max_scsi_ctrlrs
+ return len(self)
+
+
+# =============================================================================
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2024 by Frank Brehm, Berlin
+@summary: The module for a VM interface destinated to Terraform
+"""
+from __future__ import absolute_import
+
+# Standard modules
+import logging
+import re
+import ipaddress
+
+try:
+ from collections.abc import Mapping
+except ImportError:
+ from collections import Mapping
+
+# Third party modules
+
+# Own modules
+from fb_tools.common import pp, to_bool, RE_FQDN
+
+from fb_tools.obj import FbBaseObject
+
+from ..errors import TerraformVmDefinitionError
+
+from ..xlate import XLATOR
+
+__version__ = '1.0.1'
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class TerraformInterface(FbBaseObject):
+ """A class encapsulating a network interface of a VirtualMachine managed by Terraform."""
+
+ re_address = re.compile(r'^\s*address\s*$', re.IGNORECASE)
+ re_address_v4 = re.compile(r'^\s*address[_-]?(?:ip)?v4\s*$', re.IGNORECASE)
+ re_address_v6 = re.compile(r'^\s*address[_-]?(?:ip)?v6\s*$', re.IGNORECASE)
+ re_fqdn = re.compile(r'^\s*fqdn\s*$', re.IGNORECASE)
+ re_gateway = re.compile(r'^\s*gateway\s*$', re.IGNORECASE)
+ re_gateway_v4 = re.compile(r'^\s*gateway[_-]?(?:ip)?v4\s*$', re.IGNORECASE)
+ re_gateway_v6 = re.compile(r'^\s*gateway[_-]?(?:ip)?v6\s*$', re.IGNORECASE)
+ re_v4_before_v6 = re.compile(
+ r'^\s*(?:ip)?v4[_-](?:before|primary[_-]to)[_-](?:ip)?v6\s*$', re.IGNORECASE)
+ re_network = re.compile(r'^\s*network\s*$', re.IGNORECASE)
+
+ # -------------------------------------------------------------------------
+ def __init__(
+ self, appname=None, verbose=0, version=__version__, base_dir=None, initialized=False,
+ address_v4=None, address_v6=None, fqdn=None, network=None, ipv4_primary=True,
+ gateway_v4=None, gateway_v6=None, netmask_v4=None, netmask_v6=None):
+
+ self._address_v4 = None
+ self._netmask_v4 = None
+ self._address_v6 = None
+ self._netmask_v4 = None
+ self._netmask_v6 = None
+ self._fqdn = None
+ self._network = None
+ self._gateway_v4 = None
+ self._gateway_v6 = None
+ self._ipv4_primary = bool(ipv4_primary)
+
+ super(TerraformInterface, self).__init__(
+ appname=appname, verbose=verbose, version=version, base_dir=base_dir,
+ initialized=False,
+ )
+
+ if address_v4 is not None:
+ self.address_v4 = address_v4
+ if address_v6 is not None:
+ self.address_v6 = address_v6
+ if fqdn is not None:
+ self.fqdn = fqdn
+ if network is not None:
+ self.network = network
+ if gateway_v4 is not None:
+ self.gateway_v4 = gateway_v4
+ if gateway_v6 is not None:
+ self.gateway_v6 = gateway_v6
+ if netmask_v4 is not None:
+ self.netmask_v4 = netmask_v4
+ if netmask_v6 is not None:
+ self.netmask_v6 = netmask_v6
+
+ self.initialized = initialized
+
+ # -----------------------------------------------------------
+ @property
+ def ipv4_primary(self):
+ """Is the IPv6 address prior to the IPv6 address, if both are existing?"""
+ return self._ipv4_primary
+
+ @ipv4_primary.setter
+ def ipv4_primary(self, value):
+ self._ipv4_primary = bool(value)
+
+ # -----------------------------------------------------------
+ @property
+ def address_v4(self):
+ """The IPv4 address of the interface."""
+ return self._address_v4
+
+ @address_v4.setter
+ def address_v4(self, value):
+ if value is None:
+ self._address_v4 = None
+ return
+ val = str(value).strip()
+ if val == '':
+ self._address_v4 = None
+ return
+
+ addr = ipaddress.ip_address(val)
+ if addr.version != 4:
+ msg = _("IP address {!r} is not an IPv4 address.").format(addr)
+ raise ValueError(msg)
+
+ self._address_v4 = addr
+
+ # -----------------------------------------------------------
+ @property
+ def address_v6(self):
+ """The IPv6 address of the interface."""
+ return self._address_v6
+
+ @address_v6.setter
+ def address_v6(self, value):
+ if value is None:
+ self._address_v6 = None
+ return
+ val = str(value).strip()
+ if val == '':
+ self._address_v6 = None
+ return
+
+ addr = ipaddress.ip_address(val)
+ if addr.version != 6:
+ msg = _("IP address {!r} is not an IPv6 address.").format(addr)
+ raise ValueError(msg)
+
+ self._address_v6 = addr
+
+ # -----------------------------------------------------------
+ @property
+ def address(self):
+ """The IPv4 or IPv6 address of the interface."""
+ if self.address_v4 and self.address_v6:
+ if self.ipv4_primary:
+ return self.address_v4
+ else:
+ return self.address_v6
+ if self.address_v4:
+ return self.address_v4
+ if self.address_v6:
+ return self.address_v6
+ return None
+
+ @address.setter
+ def address(self, value):
+ if value is None:
+ return
+ val = str(value).strip()
+ if val == '':
+ return
+
+ addr = ipaddress.ip_address(val)
+ if addr.version == 6:
+ self._address_v6 = addr
+ else:
+ self._address_v4 = addr
+
+ # -----------------------------------------------------------
+ @property
+ def fqdn(self):
+ """The FQDN of the interface address to define."""
+ return self._fqdn
+
+ @fqdn.setter
+ def fqdn(self, value):
+ if value is None:
+ self._fqdn = None
+ return
+
+ val = str(value).strip().lower()
+ if val == '':
+ self._fqdn = None
+ return
+
+ if not RE_FQDN.search(val):
+ msg = _("The hostname {!r} is no a valid FQDN.").format(value)
+ raise ValueError(msg)
+ self._fqdn = val
+
+ # -----------------------------------------------------------
+ @property
+ def network(self):
+ """The name of the VSphere network of the interface."""
+ return self._network
+
+ @network.setter
+ def network(self, value):
+ if value is None:
+ self._network = None
+ return
+
+ val = str(value).strip()
+ if val == '':
+ self._network = None
+ return
+
+ self._network = val
+
+ # -----------------------------------------------------------
+ @property
+ def gateway_v4(self):
+ """The IPv4 gateway of the interface."""
+ return self._gateway_v4
+
+ @gateway_v4.setter
+ def gateway_v4(self, value):
+ if value is None:
+ self._gateway_v4 = None
+ return
+ val = str(value).strip()
+ if val == '':
+ self._gateway_v4 = None
+ return
+
+ addr = ipaddress.ip_address(val)
+ if addr.version != 4:
+ msg = _("IP gateway {!r} is not an IPv4 address.").format(addr)
+ raise ValueError(msg)
+
+ self._gateway_v4 = addr
+
+ # -----------------------------------------------------------
+ @property
+ def gateway_v6(self):
+ """The IPv6 gateway of the interface."""
+ return self._gateway_v6
+
+ @gateway_v6.setter
+ def gateway_v6(self, value):
+ if value is None:
+ self._gateway_v6 = None
+ return
+ val = str(value).strip()
+ if val == '':
+ self._gateway_v6 = None
+ return
+
+ addr = ipaddress.ip_address(val)
+ if addr.version != 6:
+ msg = _("IP gateway {!r} is not an IPv6 address.").format(addr)
+ raise ValueError(msg)
+
+ self._gateway_v6 = addr
+
+ # -----------------------------------------------------------
+ @property
+ def netmask_v4(self):
+ """The IPv4 netmask of the interface."""
+ return self._netmask_v4
+
+ @netmask_v4.setter
+ def netmask_v4(self, value):
+ if value is None:
+ self._netmask_v4 = None
+ return
+ val = int(value)
+ if val < 0 or val > 32:
+ msg = _("Invalid IPv4 netmask {!r}").format(value)
+ raise ValueError(msg)
+
+ self._netmask_v4 = val
+
+ # -----------------------------------------------------------
+ @property
+ def netmask_v6(self):
+ """The IPv6 netmask of the interface."""
+ return self._netmask_v6
+
+ @netmask_v6.setter
+ def netmask_v6(self, value):
+ if value is None:
+ self._netmask_v6 = None
+ return
+ val = int(value)
+ if val < 0 or val > 128:
+ msg = _("Invalid IPv6 netmask {!r}").format(value)
+ raise ValueError(msg)
+
+ self._netmask_v6 = val
+
+ # -----------------------------------------------------------
+ @property
+ def gateway(self):
+ """The IPv4 or IPv6 gateway of the interface."""
+ if self.gateway_v4 and self.gateway_v6:
+ if self.ipv4_primary:
+ return self.gateway_v4
+ else:
+ return self.gateway_v6
+ if self.gateway_v4:
+ return self.gateway_v4
+ if self.gateway_v6:
+ return self.gateway_v6
+ return None
+
+ @gateway.setter
+ def gateway(self, value):
+ if value is None:
+ return
+ val = str(value).strip()
+ if val == '':
+ return
+
+ addr = ipaddress.ip_address(val)
+ if addr.version == 6:
+ self._gateway_v6 = addr
+ else:
+ self._gateway_v4 = addr
+
+ # -------------------------------------------------------------------------
+ def as_dict(self, short=True):
+ """
+ Transforms the elements of the object into a dict
+
+ @param short: don't include local properties in resulting dict.
+ @type short: bool
+
+ @return: structure as dict
+ @rtype: dict
+ """
+
+ res = super(TerraformInterface, self).as_dict(short=short)
+ res['address'] = self.address
+ res['address_v4'] = self.address_v4
+ res['address_v6'] = self.address_v6
+ res['fqdn'] = self.fqdn
+ res['gateway'] = self.gateway
+ res['gateway_v4'] = self.gateway_v4
+ res['gateway_v6'] = self.gateway_v6
+ res['ipv4_primary'] = self.ipv4_primary
+ res['netmask_v4'] = self.netmask_v4
+ res['netmask_v6'] = self.netmask_v6
+ res['network'] = self.network
+
+ return res
+
+ # -------------------------------------------------------------------------
+ @classmethod
+ def from_def(cls, if_def, appname=None, verbose=0, base_dir=None):
+
+ if verbose > 2:
+ LOG.debug(
+ _("Trying to instantiate terraform interface from data:") + "\n" + pp(if_def))
+
+ if not isinstance(if_def, Mapping):
+ msg = _("Interface definition is not a dictionary:") + "\n" + pp(if_def)
+ raise TerraformVmDefinitionError(msg)
+
+ interface = cls(appname=appname, verbose=verbose, base_dir=base_dir)
+ interface.initialized = False
+
+ for key in sorted(if_def.keys(), key=str.lower):
+
+ val = if_def[key]
+
+ if verbose > 3:
+ LOG.debug(_("Evaluating key {k!r}: {v}").format(k=key, v=val))
+
+ if cls.re_address.search(key) and val:
+ interface.address = val
+ continue
+ if cls.re_address_v4.search(key):
+ interface.address_v4 = val
+ continue
+ if cls.re_address_v6.search(key):
+ interface.address_v6 = val
+ continue
+ if cls.re_v4_before_v6.search(key):
+ interface.ipv4_primary = to_bool(val)
+ continue
+ if cls.re_fqdn.search(key):
+ interface.fqdn = val
+ continue
+ if cls.re_network.search(key):
+ interface.network = val
+ continue
+ if cls.re_gateway.search(key) and val:
+ interface.gateway = val
+ continue
+ if cls.re_gateway_v4.search(key):
+ interface.gateway_v4 = val
+ continue
+ if cls.re_gateway_v6.search(key):
+ interface.gateway_v6 = val
+ continue
+
+ interface.initialized = False
+ return interface
+
+ # -------------------------------------------------------------------------
+ def __copy__(self):
+
+ if self.verbose > 2:
+ LOG.debug(_("Copying Terraform interface object with address {}.").format(
+ self.address))
+
+ disk = self.__class__(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ initialized=self.initialized, address_v4=self.address_v4, address_v6=self.address_v6,
+ ipv4_primary=self.ipv4_primary, fqdn=self.fqdn, network=self.network,
+ gateway_v4=self.gateway_v4, gateway_v6=self.gateway_v6
+ )
+
+ return disk
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2024 by Frank Brehm, Berlin
+@summary: The module for a VM destinated to Terraform
+"""
+from __future__ import absolute_import
+
+# Standard modules
+import logging
+import re
+import copy
+import ipaddress
+
+try:
+ from collections.abc import Iterable, Mapping
+except ImportError:
+ from collections import Iterable, Mapping
+
+# Third party modules
+
+# Own modules
+from fb_tools.common import pp, to_bool, RE_FQDN, RE_TF_NAME
+from fb_tools.common import human2mbytes, is_sequence
+
+from fb_tools.handling_obj import HandlingObject
+
+from ..errors import TerraformVmDefinitionError
+from ..errors import TerraformVmTooManyDisksError
+
+from ..config import CrTfConfiguration
+
+from ..xlate import XLATOR
+
+from .disk import TerraformDisk, TerraformDiskDict
+
+from .interface import TerraformInterface
+
+__version__ = '1.7.1'
+
+LOG = logging.getLogger(__name__)
+
+PUPPET_TIERS = (
+ 'production',
+ 'live',
+ 'test',
+ 'stage',
+ 'development',
+)
+
+PUPPET_ENVIRONMENTS = (
+ 'production',
+ 'test',
+ 'development',
+)
+
+DS_TYPES = (
+ 'ssd',
+ 'sas',
+ 'sata',
+)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class TerraformVm(HandlingObject):
+ """A class encapsulating a VirtualMachine managed by Terraform."""
+
+ default_vsphere = 'live'
+
+ default_boot_delay = 5
+ default_customer = 'Pixelpark'
+ default_ds_type = 'sata'
+ default_folder = 'pixelpark'
+ default_memory = 1024
+ default_nameservers = (
+ ipaddress.ip_address('93.188.109.13'),
+ ipaddress.ip_address('217.66.52.10'),
+ ipaddress.ip_address('212.91.225.75')
+ )
+ default_searchdomains = ('pixelpark.net', 'pixelpark.com')
+ default_dns_options = 'timeout:1 attempts:2'
+ default_num_cpus = 1
+ default_puppet_contact = '8x5@pixelpark.com'
+ default_puppet_customer = 'pixelpark'
+ default_puppet_env = 'development'
+ default_puppet_tier = 'development'
+ default_puppet_role = 'default'
+ default_purpose = "Customer project"
+ default_rootdisk_size = 20.0
+
+ valid_puppet_environments = []
+ for env in PUPPET_ENVIRONMENTS:
+ valid_puppet_environments.append(env)
+
+ max_num_cpus = 64
+ memory_chunk = 256
+ max_memory = 512 * 1024
+ max_boot_delay = 30
+ min_rootdisk_size = CrTfConfiguration.default_root_min_size
+ max_rootdisk_size = CrTfConfiguration.default_root_max_size
+
+ max_nameservers = 4
+ max_searchdomains = 5
+
+ re_key_fqdn = re.compile(r'^\s*fqdn|name\s*$', re.IGNORECASE)
+ re_key_vm_folder = re.compile(r'^\s*(?:vm[_-]?)folder\s*$', re.IGNORECASE)
+ re_key_boot_delay = re.compile(r'^\s*boot[_-]?delay\s*$', re.IGNORECASE)
+ re_key_ds_cluster = re.compile(r'^\s*(?:datastore|ds)[_-]?cluster\s*$', re.IGNORECASE)
+ re_key_ds_type = re.compile(r'^\s*(?:datastore|ds)[_-]?type\s*$', re.IGNORECASE)
+ re_key_puppet_contact = re.compile(r'^\s*puppet[_-]?contact\s*$', re.IGNORECASE)
+ re_key_puppet_customer = re.compile(r'^\s*(?:puppet|hiera)[_-]?customer\s*$', re.IGNORECASE)
+ re_key_puppet_project = re.compile(r'^\s*(?:puppet|hiera)[_-]?project\s*$', re.IGNORECASE)
+ re_key_puppet_tier = re.compile(r'^\s*puppet[_-]?tier\s*$', re.IGNORECASE)
+ re_key_puppet_env = re.compile(r'^\s*puppet[_-]?env(?:ironment)?\s*$', re.IGNORECASE)
+ re_key_puppet_role = re.compile(r'^\s*puppet[_-]?role\s*$', re.IGNORECASE)
+ re_key_puppet_initial_install = re.compile(
+ r'^\s*puppet[_-]?initial[_-]?install\s*$', re.IGNORECASE)
+ re_key_env = re.compile(r'^\s*env(?:ironment)?\s*$', re.IGNORECASE)
+ re_key_initial_install = re.compile(r'^\s*initial[_-]?install\s*$', re.IGNORECASE)
+ re_key_ns = re.compile(r'^\s*nameservers?\s*$', re.IGNORECASE)
+ re_key_searchdomain = re.compile(r'^\s*search[_-]*domains?\s*$', re.IGNORECASE)
+ re_key_dnsoptions = re.compile(r'^\s*(dns|resolv)[_-]*options?\s*$', re.IGNORECASE)
+ re_key_root_disk = re.compile(r'^\s*root[_-]?disk\s*$', re.IGNORECASE)
+ re_key_root_disk_size = re.compile(r'^\s*root[_-]?disk[_-]?size\s*$', re.IGNORECASE)
+ re_key_data_disk = re.compile(r'^\s*data[_-]?disk\s*$', re.IGNORECASE)
+ re_key_data_disks = re.compile(r'^\s*data[_-]?disks\s*$', re.IGNORECASE)
+ re_key_interface = re.compile(r'^\s*interfaces?\s*$', re.IGNORECASE)
+ re_key_has_backup = re.compile(r'^\s*has[_-]?backup\s*$', re.IGNORECASE)
+ re_key_has_puppet = re.compile(r'^\s*has[_-]?puppet\s*$', re.IGNORECASE)
+ re_key_is_rhel = re.compile(r'^\s*is[_-]?rhel\s*$', re.IGNORECASE)
+ re_memory_value = re.compile(r'^\s*(\d+(?:\.\d*)?)\s*(?:(\D+)\s*)?$')
+ re_rhel_template = re.compile(r'(?:rhel|red[\s_-]*hat[\s_-]*enterprise)', re.IGNORECASE)
+
+ re_invalid_chars = re.compile(r'[^a-z0-9@\._-]', re.IGNORECASE)
+ re_invalid_chars_role = re.compile(r'[^a-z0-9:@\._-]', re.IGNORECASE)
+
+ re_disk_size = re.compile(r'^\s*size\s*$', re.IGNORECASE)
+ re_disk_mountpoint = re.compile(r'^\s*mount[_-]?point\s*$', re.IGNORECASE)
+ re_disk_vgname = re.compile(r'^\s*vg[_-]?name\s*$', re.IGNORECASE)
+ re_disk_lvname = re.compile(r'^\s*lv[_-]?name\s*$', re.IGNORECASE)
+ re_disk_fstype = re.compile(r'^\s*fs[_-]?type\s*$', re.IGNORECASE)
+
+ re_fqdn_dot_at_end = re.compile(r'[^\.]\.$')
+
+ # -------------------------------------------------------------------------
+ def __init__(
+ self, appname=None, verbose=0, version=__version__, base_dir=None,
+ simulate=False, force=None, terminal_has_colors=False, initialized=False,
+ is_template=True, name=None, fqdn=None, folder=None, num_cpus=None, memory=None,
+ cluster=None, boot_delay=None, ds_cluster=None, datastore=None, ds_type=None,
+ customer=None, rootdisk_size=None, purpose=None, puppet_contact=None, puppet_role=None,
+ puppet_customer=None, puppet_project=None, puppet_tier=None, puppet_env=None,
+ puppet_initial_install=True, vm_template=None, nameservers=None, searchdomains=None,
+ dns_options=None, has_backup=True, has_puppet=True, already_existing=None,
+ vsphere=None, is_rhel=None):
+
+ self._vsphere = self.default_vsphere
+ self._is_template = bool(is_template)
+ self._name = None
+ self._fqdn = None
+ self._cluster = None
+ self._folder = self.default_folder
+ self._num_cpus = self.default_num_cpus
+ self._memory = self.default_memory
+ self._boot_delay = self.default_boot_delay
+ self._ds_cluster = None
+ self._datastore = None
+ self._ds_type = self.default_ds_type
+ self._customer = self.default_customer
+ self._rootdisk_size = self.default_rootdisk_size
+ self._purpose = self.default_purpose
+ self._puppet_contact = self.default_puppet_contact
+ self._puppet_customer = self.default_puppet_customer
+ self._puppet_project = None
+ self._puppet_tier = self.default_puppet_tier
+ self._puppet_env = None
+ self._puppet_role = self.default_puppet_role
+ self._puppet_initial_install = bool(puppet_initial_install)
+ self._vm_template = None
+ self._has_backup = bool(has_backup)
+ self._has_puppet = bool(has_puppet)
+ self._already_existing = False
+ self._is_rhel = None
+
+ self.disks = None
+ self.interfaces = []
+
+ self.nameservers = copy.copy(self.default_nameservers)
+ self.searchdomains = copy.copy(self.default_searchdomains)
+ self.dns_options = copy.copy(self.default_dns_options)
+
+ super(TerraformVm, self).__init__(
+ appname=appname, verbose=verbose, version=version, base_dir=base_dir,
+ simulate=simulate, force=force, terminal_has_colors=terminal_has_colors,
+ initialized=False,
+ )
+
+ self._post_init(
+ name=name, fqdn=fqdn, num_cpus=num_cpus, memory=memory, folder=folder,
+ boot_delay=boot_delay, vm_template=vm_template, puppet_contact=puppet_contact,
+ puppet_customer=puppet_customer, puppet_tier=puppet_tier, puppet_env=puppet_env,
+ puppet_initial_install=puppet_initial_install, is_rhel=is_rhel,
+ cluster=cluster, rootdisk_size=rootdisk_size, nameservers=nameservers,
+ searchdomains=searchdomains, dns_options=dns_options, purpose=purpose,
+ customer=customer, ds_cluster=ds_cluster, datastore=datastore, ds_type=ds_type,
+ already_existing=already_existing, initialized=initialized, puppet_role=puppet_role,
+ puppet_project=puppet_project, vsphere=vsphere)
+
+ # -------------------------------------------------------------------------
+ def _post_init(
+ self, name=None, fqdn=None, nameservers=None, searchdomains=None,
+ initialized=False, **kwargs):
+
+ self.disks = TerraformDiskDict(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir)
+
+ if name and str(name).strip():
+ self._name = str(name).strip()
+
+ if not self.is_template and fqdn is not None:
+ self.fqdn = fqdn
+
+ for (key, val) in kwargs.items():
+ if val is None:
+ continue
+ if hasattr(self, str(key)):
+ setattr(self, str(key), val)
+
+ if nameservers is not None:
+ self.nameservers = self._get_ns_list(nameservers)
+ if searchdomains is not None:
+ self.searchdomains = self._get_searchdomain_list(searchdomains)
+
+ if self.is_template:
+ if self.fqdn:
+ msg = _("A VM template definition may not have a FQDN (found: {!r}).").format(
+ self.fqdn)
+ raise TerraformVmDefinitionError(msg)
+ if not self.name:
+ msg = _("A VM template definition must have a name.")
+ raise TerraformVmDefinitionError(msg)
+ else:
+ if not self.fqdn:
+ msg = _("A VM definition (no template) must have a FQDN.")
+ raise TerraformVmDefinitionError(msg)
+
+ self.apply_root_disk()
+
+ self.initialized = initialized
+
+ # -------------------------------------------------------------------------
+ @classmethod
+ def from_def(
+ cls, vm_def, name=None, is_template=False, template_vm=None, appname=None,
+ verbose=0, base_dir=None, simulate=False, force=False,
+ terminal_has_colors=False, initialized=False):
+
+ if verbose > 2:
+ LOG.debug(_("Trying to instantiate VM from data:") + "\n" + pp(vm_def))
+
+ if not isinstance(vm_def, Mapping):
+ msg = _("VM definition is not a dictionary:") + "\n" + pp(vm_def)
+ raise TerraformVmDefinitionError(msg)
+
+ if template_vm:
+ if not isinstance(template_vm, TerraformVm):
+ msg = _("Given parameter {!r} is not a TerraformVm object.").format(template_vm)
+ raise TypeError(msg)
+ vm = copy.copy(template_vm)
+ vm.appname = appname
+ vm.verbose = verbose
+ vm.base_dir = base_dir
+ vm.simulate = simulate
+ vm.force = force
+ vm.terminal_has_colors = terminal_has_colors
+ else:
+ vm = cls(
+ appname=appname, verbose=verbose, base_dir=base_dir, simulate=simulate,
+ force=force, is_template=is_template, name=name,
+ terminal_has_colors=terminal_has_colors)
+ vm.initialized = False
+
+ vm.is_template = is_template
+ vm.name = name
+
+ for (key, value) in vm_def.items():
+ cls._apply_vmdef2vm(
+ vm, key, value, verbose=verbose, appname=appname, base_dir=base_dir)
+
+ vm.apply_root_disk()
+ if vm.interfaces and vm.fqdn and not vm.interfaces[0].fqdn:
+ vm.interfaces[0].fqdn = vm.fqdn
+
+ if not vm.is_template:
+ if vm.is_rhel is None:
+ vm.is_rhel = cls.guess_rhel(vm)
+
+ vm.initialized = True
+ return vm
+
+ # -------------------------------------------------------------------------
+ @classmethod
+ def _apply_vmdef2vm(cls, vm, key, value, verbose=0, appname=None, base_dir=None):
+
+ if verbose > 3:
+ LOG.debug(_("Evaluating key {k!r}: {v}").format(k=key, v=value))
+
+ if cls._apply_general_vmdef2vm(vm, key, value, verbose):
+ return
+
+ if key.lower() == 'customer' and value.strip():
+ vm.customer = value.strip()
+ return
+
+ if key.lower() == 'purpose' and value:
+ vm.purpose = value.strip()
+ return
+
+ if key.lower() == 'template' and value:
+ vm.vm_template = value
+ return
+
+ if cls.re_key_has_backup.search(key):
+ vm.has_backup = to_bool(value)
+ return
+
+ if cls._apply_puppet_vmdef2vm(vm, key, value, verbose):
+ return
+
+ if cls._apply_disk_vmdef2vm(vm, key, value, verbose):
+ return
+
+ if cls.re_key_ns.search(key):
+ if isinstance(value, Iterable):
+ ns = cls._get_ns_list(value)
+ if ns:
+ vm.nameservers = ns
+ elif value is None:
+ vm.nameservers = []
+ else:
+ LOG.error(_("Could not evaluate nameservers from {!r}.").format(value))
+ return
+
+ if cls.re_key_searchdomain.search(key):
+ if isinstance(value, Iterable):
+ domains = cls._get_searchdomain_list(value)
+ if domains:
+ vm.searchdomains = domains
+ elif value is None:
+ vm.searchdomains = []
+ else:
+ LOG.error(_("Could not evaluate search domains from {!r}.").format(value))
+ return
+
+ if cls.re_key_dnsoptions.search(key):
+ if value is None:
+ vm.dns_options = None
+ else:
+ val = value.strip().lower()
+ if val:
+ vm.dns_options = val
+ else:
+ vm.dns_options = None
+ return
+
+ if cls.re_key_interface.search(key):
+ if vm.is_template:
+ LOG.error(_("Template definitions may not have interface definitions."))
+ return
+ if isinstance(value, Iterable):
+ for if_def in value:
+ interface = TerraformInterface.from_def(
+ if_def, appname=appname, verbose=verbose, base_dir=base_dir)
+ vm.interfaces.append(interface)
+ else:
+ LOG.error(_("Could not evaluate interfaces from {!r}.").format(value))
+ return
+
+ LOG.debug(_("Unknown VM definition key {k!r} with value: {v!r}.").format(
+ k=key, v=value))
+
+ # -------------------------------------------------------------------------
+ @classmethod
+ def _apply_general_vmdef2vm(cls, vm, key, value, verbose=0):
+
+ if not vm.is_template and cls.re_key_fqdn.search(key):
+ vm.fqdn = value
+ return True
+
+ if key.lower() == 'vsphere' and value:
+ if verbose > 2:
+ LOG.debug(_("Applying vSphere {!r} to VM.").format(value))
+ vm.vsphere = value
+ return True
+
+ if key.lower() == 'cluster':
+ vm.cluster = value
+ return True
+
+ if key.lower() == 'num_cpus':
+ vm.num_cpus = value
+ return True
+
+ if key.lower() == 'memory':
+ vm.memory = value
+ return True
+
+ if cls.re_key_vm_folder.search(key) and value:
+ vm.folder = value
+ return True
+
+ if cls.re_key_boot_delay.search(key) and value:
+ vm.boot_delay = value
+ return True
+
+ if cls.re_key_ds_cluster.search(key) and value:
+ vm.ds_cluster = value
+ return True
+
+ if key.lower() == 'datastore' and value:
+ vm.datastore = value
+ return True
+
+ if cls.re_key_ds_type.search(key) and value:
+ vm.ds_type = value
+ return True
+
+ if cls.re_key_is_rhel.search(key) and value:
+ vm.is_rhel = value
+ return True
+
+ return False
+
+ # -------------------------------------------------------------------------
+ @classmethod
+ def _apply_disk_vmdef2vm(cls, vm, key, value, verbose=0):
+
+ if cls.re_key_root_disk_size.search(key):
+ vm.rootdisk_size = value
+ return True
+
+ max_disks = TerraformDisk.max_scsi_disks
+
+ LOG.debug(_("Evaluating disk data of VM {!r} ...").format(vm.name))
+
+ if cls.re_key_root_disk.search(key):
+ if isinstance(value, Mapping):
+ for (p_key, p_val) in value.items():
+ if p_key.lower() == 'size':
+ vm.rootdisk_size = p_val
+ else:
+ LOG.error(_(
+ "Could not evaluate size of root disk, {!r} is not a dictionary.").format(
+ value))
+ return True
+
+ if cls.re_key_data_disk.search(key):
+ unit_number = vm._get_disk_unit(1)
+ if isinstance(value, Mapping):
+ vm._add_data_disk(value, 'disk1', unit_number)
+ elif value is None:
+ if unit_number in vm.disks:
+ del vm.disks[unit_number]
+ else:
+ LOG.error(_("Could not evaluate data disk from {!r}.").format(value))
+ return True
+
+ if cls.re_key_data_disks.search(key):
+ if is_sequence(value):
+ current_disk = 1
+ if len(vm.disks) == 2:
+ current_disk = 2
+ total_disks = 2 + len(value)
+ else:
+ total_disks = 1 + len(value)
+
+ if total_disks > max_disks:
+ raise TerraformVmTooManyDisksError(total_disks, max_disks)
+
+ # unit_number = vm._get_disk_unit(current_disk)
+
+ for disk_data in value:
+ name = "disk{}".format(current_disk)
+ unit_number = vm._get_disk_unit(current_disk)
+ vm._add_data_disk(disk_data, name, unit_number)
+ current_disk += 1
+ elif value is None:
+ if verbose > 1:
+ LOG.debug(_("Data disks for VM {!r} were set to None.").format(vm.name))
+ else:
+ LOG.error(_("Could not evaluate data disks from {!r}.").format(value))
+ return True
+
+ LOG.debug(_("The VM {vm!r} has {nrd} disks and {nrc} SCSI controllers.").format(
+ vm=vm.name, nrd=len(vm.disks), nrc=vm.disks.get_ctrlr_count()))
+
+ return False
+
+ # -------------------------------------------------------------------------
+ @classmethod
+ def _apply_puppet_vmdef2vm(cls, vm, key, value, verbose=0):
+
+ if key.lower() == 'puppet' and isinstance(value, Mapping):
+ for (p_key, p_value) in value.items():
+ cls._apply_puppetsub_vmdef2vm(
+ vm=vm, p_key=p_key, p_value=p_value, verbose=verbose)
+ return True
+
+ if cls.re_key_has_puppet.search(key):
+ vm.has_puppet = to_bool(value)
+ return True
+
+ if not hasattr(value, 'strip'):
+ if verbose > 3:
+ LOG.debug(_("Key {k!r} has no string value, but a {c!r} instead.").format(
+ k=key, c=value.__class__.__name__))
+ return False
+
+ if isinstance(value, str):
+ val_stripped = value.strip()
+ else:
+ val_stripped = str(value)
+
+ if cls.re_key_puppet_contact.search(key) and val_stripped:
+ if cls.re_invalid_chars.search(val_stripped):
+ LOG.error(_("Invalid contact name {!r}.").format(value))
+ else:
+ vm.puppet_contact = val_stripped
+ return True
+
+ if cls.re_key_puppet_customer.search(key) and val_stripped:
+ if cls.re_invalid_chars.search(val_stripped):
+ LOG.error(_("Invalid puppet customer name {!r}.").format(value))
+ else:
+ vm.puppet_customer = val_stripped
+ return True
+
+ if cls.re_key_puppet_project.search(key) and val_stripped:
+ if cls.re_invalid_chars.search(val_stripped):
+ LOG.error(_("Invalid puppet customer project name {!r}.").format(value))
+ else:
+ vm.puppet_project = val_stripped
+ return True
+
+ if cls.re_key_puppet_role.search(key) and val_stripped:
+ if cls.re_invalid_chars_role.search(val_stripped):
+ LOG.error(_("Invalid puppet role {!r}.").format(value))
+ else:
+ vm.puppet_role = val_stripped
+ return True
+
+ if cls.re_key_puppet_initial_install.search(key):
+ vm.puppet_initial_install = value
+ return True
+
+ if cls.re_key_puppet_tier.search(key) and val_stripped:
+ if cls.re_invalid_chars.search(val_stripped):
+ LOG.error(_("Invalid puppet tier {!r}.").format(value))
+ else:
+ vm.puppet_tier = val_stripped
+ return True
+
+ if cls.re_key_puppet_env.search(key) and val_stripped:
+ if verbose > 2:
+ LOG.debug(_("Setting Puppet environment to {!r}.").format(val_stripped))
+ if cls.re_invalid_chars.search(val_stripped):
+ LOG.error(_("Invalid puppet environment {!r}.").format(value))
+ else:
+ vm.puppet_env = val_stripped
+ return True
+
+ return False
+
+ # -------------------------------------------------------------------------
+ @classmethod
+ def _apply_puppetsub_vmdef2vm(cls, vm, p_key, p_value, verbose=0):
+
+ if isinstance(p_value, str):
+ p_value_stripped = p_value.strip()
+ else:
+ p_value_stripped = str(p_value)
+ if verbose > 2:
+ LOG.debug(_("Evaluating sub key of {d!r}: {k!r} => {v!r}").format(
+ d='puppet', k=p_key, v=p_value_stripped))
+
+ if p_key.lower() == 'contact' and p_value_stripped:
+ if cls.re_invalid_chars.search(p_value_stripped):
+ LOG.error(_("Invalid puppet contact name {!r}.").format(p_value))
+ else:
+ vm.puppet_contact = p_value_stripped
+ return
+
+ if p_key.lower() == 'customer' and p_value_stripped:
+ if cls.re_invalid_chars.search(p_value_stripped):
+ LOG.error(_("Invalid puppet customer name {!r}.").format(p_value))
+ else:
+ vm.puppet_customer = p_value_stripped
+ return
+
+ if p_key.lower() == 'project' and p_value_stripped:
+ if cls.re_invalid_chars.search(p_value_stripped):
+ LOG.error(_("Invalid puppet customer project name {!r}.").format(p_value))
+ else:
+ vm.puppet_project = p_value_stripped
+ return
+
+ if p_key.lower() == 'role' and p_value_stripped:
+ if cls.re_invalid_chars_role.search(p_value_stripped):
+ LOG.error(_("Invalid puppet role {!r}.").format(p_value))
+ else:
+ vm.puppet_role = p_value_stripped
+ return
+
+ if cls.re_key_initial_install.search(p_key):
+ vm.puppet_initial_install = p_value
+ return
+
+ if p_key.lower() == 'tier' and p_value_stripped:
+ if cls.re_invalid_chars.search(p_value_stripped):
+ LOG.error(_("Invalid puppet tier {!r}.").format(p_value))
+ else:
+ vm.puppet_tier = p_value_stripped
+ return
+
+ if cls.re_key_env.search(p_key) and p_value_stripped:
+ if verbose > 2:
+ LOG.debug(
+ _("Setting Puppet environment to {!r}.").format(p_value_stripped))
+ LOG.debug
+ if cls.re_invalid_chars.search(p_value_stripped):
+ LOG.error(_("Invalid puppet environment {!r}.").format(p_value))
+ else:
+ vm.puppet_env = p_value_stripped
+
+ return
+
+ # -------------------------------------------------------------------------
+ def __copy__(self):
+
+ if self.verbose > 2:
+ n = self.name
+ if self.is_template:
+ tpl = _('Template')
+ if n is None:
+ n = tpl
+ else:
+ n += ' (' + tpl + ')'
+ LOG.debug(_("Copying Terraform VM object {!r} ...").format(n))
+
+ vm = self.__class__(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ simulate=self.simulate, force=self.force, initialized=self.initialized,
+ terminal_has_colors=self.terminal_has_colors, name=self.name,
+ is_template=self.is_template, fqdn=self.fqdn, folder=self.folder,
+ num_cpus=self.num_cpus, memory=self.memory, boot_delay=self.boot_delay,
+ cluster=self.cluster, ds_cluster=self.ds_cluster, datastore=self.datastore,
+ ds_type=self.ds_type, customer=self.customer, purpose=self.purpose,
+ vm_template=self.vm_template, puppet_contact=self.puppet_contact,
+ puppet_customer=self.puppet_customer, puppet_tier=self.puppet_tier,
+ puppet_env=self.puppet_env, puppet_role=self.puppet_role, nameservers=self.nameservers,
+ searchdomains=self.searchdomains, dns_options=self.dns_options,
+ rootdisk_size=self.rootdisk_size, has_backup=self.has_backup,
+ has_puppet=self.has_puppet, puppet_project=self.puppet_project,
+ puppet_initial_install=self.puppet_initial_install, vsphere=self.vsphere,
+ )
+
+ vm.disks = copy.copy(self.disks)
+
+ vm.interfaces = []
+ for interface in self.interfaces:
+ vm.interfaces.append(copy.copy(interface))
+
+ return vm
+
+ # -------------------------------------------------------------------------
+ @classmethod
+ def _get_ns_list(cls, nameservers):
+
+ if not isinstance(nameservers, Iterable):
+ raise ValueError(_("Parameter {p} {ns!r} is not iterable.").format(
+ p='nameservers', ns=nameservers))
+
+ ns = []
+ i = 1
+ for val in nameservers:
+ try:
+ address = ipaddress.ip_address(val)
+ if i > cls.max_nameservers:
+ LOG.warn(_(
+ "There are at most {mx} nameservers accepted, {addr} "
+ "will not be considered.").format(
+ mx=cls.max_nameservers, addr=address))
+ elif address not in ns:
+ ns.append(address)
+ i += 1
+ except Exception as e:
+ LOG.error(_("Invalid nameserver address {v!r}: {e}").format(
+ v=val, e=e))
+
+ return ns
+
+ # -------------------------------------------------------------------------
+ @classmethod
+ def _get_searchdomain_list(cls, searchdomains):
+
+ if not isinstance(searchdomains, Iterable):
+ raise ValueError(_("Parameter {p} {ns!r} is not iterable.").format(
+ p='searchdomains', ns=searchdomains))
+
+ domains = []
+ i = 1
+ for dom in searchdomains:
+ if i > cls.max_searchdomains:
+ LOG.warn(_(
+ "There are at most {mx} search domains accepted, {srv} "
+ "will not be considered.").format(
+ mx=cls.max_searchdomains, srv=dom))
+ elif dom not in domains:
+ domains.append(dom)
+ i += 1
+
+ return domains
+
+ # -------------------------------------------------------------------------
+ @classmethod
+ def guess_rhel(cls, vm):
+ """Trying to guess, whether the VM to deploy should be a RHEL instance."""
+
+ if not vm.vm_template:
+ msg = _(
+ "The VM {!r} was no VMware template assigned, assuming the VM "
+ "should become a RHEL instance.").format(vm.fqdn)
+ LOG.warn(msg)
+ return True
+
+ ret = False
+ if cls.re_rhel_template.search(vm.vm_template):
+ ret = True
+
+ msg = _("Guessing the VM {fqdn!r} should become a RHEL instance: {ret!r}").format(
+ fqdn=vm.fqdn, ret=ret)
+ LOG.debug(msg)
+
+ return ret
+
+ # -----------------------------------------------------------
+ @property
+ def is_template(self):
+ """A flag indicating, that this is a template instance."""
+ return self._is_template
+
+ @is_template.setter
+ def is_template(self, value):
+ self._is_template = bool(value)
+
+ # -----------------------------------------------------------
+ @property
+ def puppet_initial_install(self):
+ """Set the initial_install flag for Puppet."""
+ return self._puppet_initial_install
+
+ @puppet_initial_install.setter
+ def puppet_initial_install(self, value):
+ self._puppet_initial_install = to_bool(value)
+
+ # -----------------------------------------------------------
+ @property
+ def has_backup(self):
+ """A flag indicating, that the VM should run the backup client."""
+ return self._has_backup
+
+ @has_backup.setter
+ def has_backup(self, value):
+ self._has_backup = bool(value)
+
+ # -----------------------------------------------------------
+ @property
+ def has_puppet(self):
+ """A flag indicating, that the VM should ishould be managed by puppet."""
+ return self._has_puppet
+
+ @has_puppet.setter
+ def has_puppet(self, value):
+ self._has_puppet = bool(value)
+
+ # -----------------------------------------------------------
+ @property
+ def fqdn(self):
+ """The FQDN of the VM to define. May be Non in case of template instances."""
+ return self._fqdn
+
+ @fqdn.setter
+ def fqdn(self, value):
+ if value is None:
+ self._fqdn = None
+ return
+
+ val = str(value).strip().lower()
+ if val == '':
+ self._fqdn = None
+ return
+
+ if not RE_FQDN.search(val):
+ msg = _("The hostname {!r} is no a valid FQDN.").format(value)
+ raise ValueError(msg)
+
+ if self.re_fqdn_dot_at_end.search(val):
+ msg = _("The hostname {!r} may not end with a dot '.'.").format(value)
+ raise ValueError(msg)
+
+ self._fqdn = val
+
+ # -----------------------------------------------------------
+ @property
+ def name(self):
+ """The name of the VM - if it is no template, then the FQDN."""
+ if self.is_template:
+ return self._name
+ return self._fqdn
+
+ @name.setter
+ def name(self, value):
+ if value is None:
+ if not self.is_template:
+ self._name = None
+ return
+ msg = _("The name of a template VM may not be None.")
+ raise TerraformVmDefinitionError(msg)
+
+ val = str(value).strip()
+ if val == '':
+ if not self.is_template:
+ self._name = None
+ return
+ msg = _("The name of a template VM may not be empty.")
+ raise TerraformVmDefinitionError(msg)
+
+ self._name = val
+
+ # -----------------------------------------------------------
+ @property
+ def vsphere(self):
+ """The name of the VSPhere from configuration, in which
+ the VM should be created."""
+ return self._vsphere
+
+ @vsphere.setter
+ def vsphere(self, value):
+ if value is None:
+ msg = _("The name of the VSPhere may not be None.")
+ raise TerraformVmDefinitionError(msg)
+
+ val = str(value).strip()
+ if val == '':
+ msg = _("The name of the VSPhere may not be empty.")
+ raise TerraformVmDefinitionError(msg)
+
+ self._vsphere = val
+
+ # -----------------------------------------------------------
+ @property
+ def tf_name(self):
+ """The name of the VM how used in terraform."""
+ if self.name is None:
+ return None
+ return 'vm_' + RE_TF_NAME.sub('_', self.name.lower())
+
+ # -----------------------------------------------------------
+ @property
+ def hostname(self):
+ """The base hostname of the VM (without domain)."""
+ if self._fqdn is None:
+ return None
+ return self._fqdn.split('.')[0]
+
+ # -----------------------------------------------------------
+ @property
+ def domain(self):
+ """The domain part of the host FQDN."""
+ if self._fqdn is None:
+ return None
+ return '.'.join(self._fqdn.split('.')[1:])
+
+ # -----------------------------------------------------------
+ @property
+ def num_cpus(self):
+ """Number of CPUs of the VM (num_cores_per_socket is always 1)."""
+ return self._num_cpus
+
+ @num_cpus.setter
+ def num_cpus(self, value):
+ val = int(value)
+ if val < 1 or val > self.max_num_cpus:
+ msg = _(
+ "Invalid number of CPUs {n} - number must be "
+ "{min} <= NUMBER <= {max}.").format(n=val, min=1, max=self.max_num_cpus)
+ raise ValueError(msg)
+ self._num_cpus = val
+
+ # -----------------------------------------------------------
+ @property
+ def memory(self):
+ """Memory of the VM in MiBytes, must be a multiple of 256."""
+ return self._memory
+
+ @memory.setter
+ def memory(self, value):
+
+ value = str(value)
+ if self.verbose > 2:
+ LOG.debug(_("Trying to detect memory from value {!r}.").format(value))
+
+ match = self.re_memory_value.search(value)
+ if not match:
+ raise ValueError(_("Invalid memory {!r}.").format(value))
+ val_raw = match.group(1)
+ unit = match.group(2)
+ if unit:
+ val_raw = "{v} {u}".format(v=val_raw, u=unit)
+ else:
+ val_raw += ' MiB'
+
+ val = human2mbytes(val_raw)
+ if val < self.memory_chunk or val > self.max_memory:
+ msg = _("Invalid memory {m} - memory must be {min} <= MiBytes <= {max}.").format(
+ m=val, min=self.memory_chunk, max=self.max_memory)
+ raise ValueError(msg)
+ modulus = val % self.memory_chunk
+ if modulus:
+ msg = _("Invalid memory {m}, must be a multipe of {c}.").format(
+ m=val, c=self.memory_chunk)
+ raise ValueError(msg)
+ self._memory = val
+
+ # -----------------------------------------------------------
+ @property
+ def cluster(self):
+ """The name of the computing cluster, where the VM should be instantiated."""
+ return self._cluster
+
+ @cluster.setter
+ def cluster(self, value):
+ if value is None:
+ msg = _("The name of the computing cluster of the VM may not be None.")
+ raise TerraformVmDefinitionError(msg)
+
+ val = str(value).strip()
+ if val == '':
+ msg = _("The name of the computing cluster of the VM may not be empty.")
+ raise TerraformVmDefinitionError(msg)
+
+ self._cluster = val
+
+ # -----------------------------------------------------------
+ @property
+ def folder(self):
+ """The VM folder of the VM in VSphere."""
+ return self._folder
+
+ @folder.setter
+ def folder(self, value):
+ if value is None:
+ LOG.warn(_("A folder name may not be None."))
+ return
+
+ val = str(value).strip()
+ if val == '':
+ LOG.warn(_("A folder name may not be empty."))
+ return
+ self._folder = val
+
+ # -----------------------------------------------------------
+ @property
+ def boot_delay(self):
+ """Boot delay in seconds of the VM."""
+ return self._boot_delay
+
+ @boot_delay.setter
+ def boot_delay(self, value):
+ val = float(value)
+ if val < 0 or val > self.max_boot_delay:
+ msg = _(
+ "Invalid boot delay {b:0.1} - delay must be "
+ "{min} <= NUMBER <= {max}.").format(b=val, min=0, max=self.max_boot_delay)
+ raise ValueError(msg)
+ self._boot_delay = val
+
+ # -----------------------------------------------------------
+ @property
+ def ds_cluster(self):
+ """An optional defined datastore cluster."""
+ return self._ds_cluster
+
+ @ds_cluster.setter
+ def ds_cluster(self, value):
+ if value is None:
+ self._ds_cluster = None
+ return
+ val = str(value).strip()
+ if val == '':
+ self._ds_cluster = None
+ return
+ self._ds_cluster = val
+
+ # -----------------------------------------------------------
+ @property
+ def datastore(self):
+ """An optional defined datastore."""
+ return self._datastore
+
+ @datastore.setter
+ def datastore(self, value):
+ if value is None:
+ self._datastore = None
+ return
+ val = str(value).strip()
+ if val == '':
+ self._datastore = None
+ return
+ self._datastore = val
+
+ # -----------------------------------------------------------
+ @property
+ def ds_type(self):
+ """The type of the datastore (SATA,SAS or SSD).
+ Used for autoexpolring."""
+ return self._ds_type
+
+ @ds_type.setter
+ def ds_type(self, value):
+ if value is None:
+ self._ds_type = None
+ return
+ val = str(value).strip().lower()
+ if val == '':
+ self._ds_type = None
+ return
+ if val not in DS_TYPES:
+ msg = _("Datastore type {t!r} not allowed, valid datastore types are: {li}").format(
+ t=value, li=DS_TYPES)
+ raise ValueError(msg)
+ self._ds_type = val
+
+ # -----------------------------------------------------------
+ @property
+ def customer(self):
+ """The customer of the VM in VSphere."""
+ return self._customer
+
+ @customer.setter
+ def customer(self, value):
+ if value is None:
+ LOG.warn(_("A customer name may not be None."))
+ return
+
+ val = str(value).strip()
+ if val == '':
+ LOG.warn(_("A customer name may not be empty."))
+ return
+ self._customer = val
+
+ # -----------------------------------------------------------
+ @property
+ def owner(self):
+ """The customer of the VM in VSphere for /etc/motd."""
+ return self._customer
+
+ # -----------------------------------------------------------
+ @property
+ def purpose(self):
+ """The purpose of the VM in VSphere."""
+ return self._purpose
+
+ @purpose.setter
+ def purpose(self, value):
+ if value is None:
+ LOG.warn(_("A purpose may not be None."))
+ return
+
+ val = str(value).strip()
+ if val == '':
+ LOG.warn(_("A purpose may not be empty."))
+ return
+ self._purpose = val
+
+ # -----------------------------------------------------------
+ @property
+ def vm_template(self):
+ """The name of the VM or template in VSphere to use as template."""
+ return self._vm_template
+
+ @vm_template.setter
+ def vm_template(self, value):
+ if value is None:
+ LOG.warn(_("A template VM name may not be None."))
+ return
+
+ val = str(value).strip()
+ if val == '':
+ LOG.warn(_("A template VM name may not be empty."))
+ return
+ self._vm_template = val
+
+ # -----------------------------------------------------------
+ @property
+ def puppet_contact(self):
+ """The name or address of the contact for the VM."""
+ return self._puppet_contact
+
+ @puppet_contact.setter
+ def puppet_contact(self, value):
+ if value is None:
+ LOG.warn(_("A puppet contact name may not be None."))
+ return
+
+ val = str(value).strip()
+ if val == '':
+ LOG.warn(_("A puppet contact name may not be empty."))
+ return
+ self._puppet_contact = val
+
+ # -----------------------------------------------------------
+ @property
+ def puppet_customer(self):
+ """The name of the puppet hiera customer for the VM."""
+ return self._puppet_customer
+
+ @puppet_customer.setter
+ def puppet_customer(self, value):
+ if value is None:
+ LOG.warn(_("A puppet hiera customer name may not be None."))
+ return
+
+ val = str(value).strip()
+ if val == '':
+ LOG.warn(_("A puppet hiera customer name may not be empty."))
+ return
+ if '/' in val:
+ LOG.error(_("A puppet hiera customer name may not contain a slash (/) character."))
+ return
+ self._puppet_customer = val
+
+ # -----------------------------------------------------------
+ @property
+ def puppet_project(self):
+ """The name of the puppet customer project for the VM."""
+ return self._puppet_project
+
+ @puppet_project.setter
+ def puppet_project(self, value):
+ if value is None:
+ LOG.warn(_("A puppet hiera project name should not be None."))
+ self._puppet_project = None
+ return
+
+ val = str(value).strip().lower()
+ if val == '':
+ LOG.warn(_("A puppet hiera customer project may not be empty."))
+ return
+ if '/' in val:
+ LOG.error(_("A puppet hiera customer project may not contain a slash (/) character."))
+ return
+ self._puppet_project = val
+
+ # -----------------------------------------------------------
+ @property
+ def hiera_customer(self):
+ """The name of the hiera customer for the VM."""
+ return self._puppet_customer
+
+ @hiera_customer.setter
+ def hiera_customer(self, value):
+ self.puppet_customer = value
+
+ # -----------------------------------------------------------
+ @property
+ def hiera_project(self):
+ """The name of the customer project for the VM."""
+ return self._puppet_project
+
+ @hiera_project.setter
+ def hiera_project(self, value):
+ self.puppet_project = value
+
+ # -----------------------------------------------------------
+ @property
+ def puppet_role(self):
+ """The name of the puppet role for the VM."""
+ return self._puppet_role
+
+ @puppet_role.setter
+ def puppet_role(self, value):
+ if value is None:
+ LOG.warn(_("A puppet role may not be None."))
+ return
+
+ val = str(value).strip()
+ if val == '':
+ LOG.warn(_("A puppet role may not be empty."))
+ return
+ self._puppet_role = val
+
+ # -----------------------------------------------------------
+ @property
+ def puppet_tier(self):
+ """The name of the puppet tier of the VM."""
+ return self._puppet_tier
+
+ @puppet_tier.setter
+ def puppet_tier(self, value):
+ if value is None:
+ LOG.warn(_("A puppet tier name may not be None."))
+ return
+
+ val = str(value).strip().lower()
+ if val == '':
+ LOG.warn(_("A puppet tier name may not be empty."))
+ return
+
+ if val not in PUPPET_TIERS:
+ LOG.warn(_("A puppet tier should be one of {li} (given: {v!r}).").format(
+ li=pp(PUPPET_TIERS), v=value))
+
+ self._puppet_tier = val
+
+ # -----------------------------------------------------------
+ @property
+ def puppet_env(self):
+ """The name of the puppet environment of the VM."""
+ if self._puppet_env is not None:
+ return self._puppet_env
+ if self.is_template:
+ return None
+ return self.puppet_tier
+
+ @puppet_env.setter
+ def puppet_env(self, value):
+ if value is None:
+ return
+
+ val = str(value).strip().lower()
+ if val == '':
+ self._puppet_env = None
+ return
+
+ if val not in PUPPET_ENVIRONMENTS:
+ LOG.warn(_("A puppet environment should be one of {li} (given: {v!r}).").format(
+ li=pp(PUPPET_ENVIRONMENTS), v=value))
+
+ self._puppet_env = val
+
+ # -----------------------------------------------------------
+ @property
+ def puppet_environment(self):
+ """The name of the puppet environment of the VM."""
+ return self.puppet_env
+
+ @puppet_environment.setter
+ def puppet_environment(self, value):
+ self.puppet_env = value
+
+ # -----------------------------------------------------------
+ @property
+ def rootdisk_size(self):
+ """Size of the root disk of the VM in GiB."""
+ return self._rootdisk_size
+
+ @rootdisk_size.setter
+ def rootdisk_size(self, value):
+ val = float(value)
+ msg = _(
+ "Invalid root disk size {n} - size must be "
+ "{min} <= SIZE <= {max}.").format(
+ n=val, min=self.min_rootdisk_size, max=self.max_rootdisk_size)
+ if val < self.min_rootdisk_size or val > self.max_rootdisk_size:
+ raise ValueError(msg)
+ self._rootdisk_size = val
+
+ # -----------------------------------------------------------
+ @property
+ def already_existing(self):
+ """The Virtual machine is already existing in VSphere."""
+ return self._already_existing
+
+ @already_existing.setter
+ def already_existing(self, value):
+ self._already_existing = to_bool(value)
+
+ # -----------------------------------------------------------
+ @property
+ def is_rhel(self):
+ """A flag indicating, that the VM should ishould be managed by puppet."""
+ return self._is_rhel
+
+ @is_rhel.setter
+ def is_rhel(self, value):
+ if value is None:
+ self._is_rhel = None
+ return
+ self._is_rhel = bool(value)
+
+ # -------------------------------------------------------------------------
+ def as_dict(self, short=True):
+ """
+ Transforms the elements of the object into a dict
+
+ @param short: don't include local properties in resulting dict.
+ @type short: bool
+
+ @return: structure as dict
+ @rtype: dict
+ """
+
+ res = super(TerraformVm, self).as_dict(short=short)
+ res['already_existing'] = self.already_existing
+ res['boot_delay'] = self.boot_delay
+ res['cluster'] = self.cluster
+ res['customer'] = self.customer
+ res['datastore'] = self.datastore
+ res['domain'] = self.domain
+ res['ds_cluster'] = self.ds_cluster
+ res['ds_type'] = self.ds_type
+ res['folder'] = self.folder
+ res['fqdn'] = self.fqdn
+ res['has_backup'] = self.has_backup
+ res['has_puppet'] = self.has_puppet
+ res['hiera_customer'] = self.hiera_customer
+ res['hiera_project'] = self.hiera_project
+ res['hostname'] = self.hostname
+ res['interfaces'] = []
+ res['is_rhel'] = self.is_rhel
+ res['is_template'] = self.is_template
+ res['memory'] = self.memory
+ res['name'] = self.name
+ res['num_cpus'] = self.num_cpus
+ res['owner'] = self.owner
+ res['puppet_contact'] = self.puppet_contact
+ res['puppet_customer'] = self.puppet_customer
+ res['puppet_project'] = self.puppet_project
+ res['puppet_env'] = self.puppet_env
+ res['puppet_environment'] = self.puppet_environment
+ res['puppet_role'] = self.puppet_role
+ res['puppet_tier'] = self.puppet_tier
+ res['puppet_initial_install'] = self.puppet_initial_install
+ res['purpose'] = self.purpose
+ res['rootdisk_size'] = self.rootdisk_size
+ res['tf_name'] = self.tf_name
+ res['vm_template'] = self.vm_template
+ res['vsphere'] = self.vsphere
+
+ for interface in self.interfaces:
+ res['interfaces'].append(interface.as_dict(short=short))
+
+ return res
+
+ # -------------------------------------------------------------------------
+ def apply_root_disk(self):
+
+ if self.verbose > 2:
+ LOG.debug(_("Resetting root disk."))
+
+ disk_name = 'disk0'
+
+ disk = TerraformDisk(
+ name=disk_name, root_disk=True, unit_number=0, size_gb=self.rootdisk_size,
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ initialized=True)
+
+ self.disks[disk_name] = disk
+
+ # -------------------------------------------------------------------------
+ def _add_data_disk(self, disk_def, name, unit_number=1):
+
+ params = {
+ 'name': name,
+ 'unit_number': unit_number,
+ }
+
+ for key in disk_def.keys():
+ val = disk_def[key]
+ if self.re_disk_size.search(key) and val:
+ params['size_gb'] = val
+ elif self.re_disk_vgname.search(key):
+ params['vg_name'] = val
+ elif self.re_disk_lvname.search(key):
+ params['lv_name'] = val
+ elif self.re_disk_mountpoint.search(key):
+ params['mountpoint'] = val
+ elif self.re_disk_fstype.search(key) and val:
+ params['fs_type'] = val
+
+ if self.verbose > 2:
+ LOG.debug(_("Using parameters for init data disk:") + "\n" + pp(params))
+
+ params['appname'] = self.appname
+ params['verbose'] = self.verbose
+ params['base_dir'] = self.base_dir
+
+ disk = TerraformDisk(**params)
+ disk.initialized = True
+ if self.verbose > 2:
+ LOG.debug(_("Got data disk {!r}:").format(name) + "\n" + pp(disk.as_dict()))
+ self.disks[name] = disk
+
+ # -------------------------------------------------------------------------
+ def _get_disk_unit(self, current_disk_nr=0):
+ """Tries to evaluate the disk_unit my the current number in the list."""
+ disks_per_ctrlr = TerraformDisk.disks_per_scsi_ctrlr
+ max_scsi_ctrlrs = TerraformDisk.max_scsi_ctrlrs
+ max_disks = TerraformDisk.max_scsi_disks
+
+ if self.verbose > 2:
+ LOG.debug(_("Trying to get unit_id of disk number {}.").format(current_disk_nr))
+
+ if current_disk_nr >= max_disks:
+ raise TerraformVmTooManyDisksError(current_disk_nr + 1, max_disks)
+
+ ctrlr_id = current_disk_nr % max_scsi_ctrlrs
+ id_offset = current_disk_nr // max_scsi_ctrlrs
+ unit_id = ctrlr_id * disks_per_ctrlr + id_offset
+
+ if self.verbose > 1:
+ LOG.debug(_("Got unit_id {id} for disk number {nr} (controller ID {cid}).").format(
+ id=unit_id, nr=current_disk_nr, cid=ctrlr_id))
+
+ return unit_id
+
+
+# =============================================================================
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2024 by Frank Brehm, Berlin
+@summary: A handler module for underlaying actions
+"""
+from __future__ import absolute_import, print_function
+
+# Standard module
+import logging
+
+HAS_GETCH = False
+try:
+ import getch
+ HAS_GETCH = True
+except ImportError:
+ pass
+
+# Third party modules
+from fb_tools.common import to_str
+
+__version__ = '0.1.0'
+LOG = logging.getLogger(__name__)
+
+
+# =============================================================================
+def password_input_getch(prompt='', fill_char='*', max_len=64):
+ p_s = ''
+ proxy_string = ' ' * 64
+
+ while True:
+
+ print('\r' + proxy_string, end='', flush=True)
+ print('\r' + prompt, end='', flush=True)
+
+ c = getch.getch()
+ if c == b'\r' or c == b'\n':
+ break
+ elif c == b'\x08':
+ if len(p_s):
+ p_s = p_s[:-1]
+ continue
+
+ p_s += to_str(c)
+ if len(p_s) >= max_len:
+ break
+
+ print('', flush=True)
+ return p_s
+
+
+# =============================================================================
+def password_input(prompt='', fill_char='*', max_len=64):
+
+ if HAS_GETCH:
+ return password_input_getch(prompt=prompt, fill_char=fill_char, max_len=max_len)
+
+ import getpass
+
+ return getpass.getpass(prompt=prompt)
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2024 by Frank Brehm, Berlin
+@summary: A module for providing a configuration
+"""
+from __future__ import absolute_import
+
+# Standard module
+import logging
+
+# Third party modules
+from fb_tools.obj import FbBaseObject
+
+from fb_tools.common import is_sequence
+
+# Own modules
+from .errors import CrTfConfigError
+
+from .xlate import XLATOR
+
+__version__ = '1.8.1'
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class VsphereConfig(FbBaseObject):
+ """Class for encapsulation of config data of a connection to a VSPhere center."""
+
+ default_host = 'vcs01.ppbrln.internal'
+ default_port = 443
+ default_dc = 'vmcc'
+ default_cluster = 'vmcc-l105-01'
+ default_min_root_size_gb = 20.0
+ default_guest_id = 'centos8_64Guest'
+
+ default_template_name = 'rhel9-template'
+
+ # -------------------------------------------------------------------------
+ def __init__(
+ self, appname=None, verbose=0, version=__version__, base_dir=None, name=None,
+ host=None, port=None, user=None, password=None, dc=None, cluster=None,
+ template_name=None, min_root_size_gb=None, excluded_ds=None, guest_id=None,
+ rhsm_user=None, rhsm_password=None, initialized=False):
+
+ self._name = None
+ self._host = self.default_host
+ self._port = self.default_port
+ self._user = None
+ self._password = None
+ self._dc = self.default_dc
+ self._cluster = self.default_cluster
+ self._template_name = self.default_template_name
+ self._min_root_size_gb = self.default_min_root_size_gb
+ self._guest_id = self.default_guest_id
+ self.excluded_ds = []
+ self.used_templates = []
+
+ super(VsphereConfig, self).__init__(
+ appname=appname, verbose=verbose, version=version,
+ base_dir=base_dir, initialized=False,
+ )
+
+ if name is not None:
+ self.name = name
+ if host is not None:
+ self.host = host
+ if port is not None:
+ self.port = port
+ if user is not None:
+ self.user = user
+ if password is not None:
+ self.password = password
+ if dc is not None:
+ self.dc = dc
+ if cluster is not None:
+ self.cluster = cluster
+ if template_name is not None:
+ self.template_name = template_name
+ if min_root_size_gb is not None:
+ self.min_root_size_gb = min_root_size_gb
+ if guest_id is not None:
+ self.guest_id = guest_id
+
+ if excluded_ds:
+ if is_sequence(excluded_ds):
+ for ds in excluded_ds:
+ self.excluded_ds.append(str(ds))
+ else:
+ self.excluded_ds.append(str(excluded_ds))
+
+ if initialized:
+ self.initialized = True
+
+ # -----------------------------------------------------------
+ @property
+ def name(self):
+ """The name of the VSphere."""
+ return self._name
+
+ @name.setter
+ def name(self, value):
+ if value is None:
+ self._name = None
+ return
+ val = str(value).strip().lower()
+ if val == '':
+ self._name = None
+ else:
+ self._name = val
+
+ # -----------------------------------------------------------
+ @property
+ def host(self):
+ """The host name or address of the VSphere server."""
+ return self._host
+
+ @host.setter
+ def host(self, value):
+ if value is None:
+ self._host = self.default_host
+ return
+ val = str(value).strip().lower()
+ if val == '':
+ self._host = None
+ else:
+ self._host = val
+
+ # -----------------------------------------------------------
+ @property
+ def port(self):
+ """The TCP port number, where the API is listening on the VSphere server."""
+ return self._port
+
+ @port.setter
+ def port(self, value):
+ if value is None:
+ self._port = self.default_port
+ return
+ val = self.default_port
+ try:
+ val = int(value)
+ if val < 1:
+ msg = _("a port may not be less than 1: {}.").format(val)
+ raise CrTfConfigError(msg)
+ max_val = (2 ** 16) - 1
+ if val > max_val:
+ msg = _("a port may not be greater than {m}: {v}.").format(
+ m=max_val, v=val)
+ raise CrTfConfigError(msg)
+ except ValueError as e:
+ msg = _("Wrong port number {v!r}: {e}").format(v=value, e=e)
+ LOG.error(msg)
+ else:
+ self._port = val
+
+ # -----------------------------------------------------------
+ @property
+ def user(self):
+ """The user name to connect to the VSphere server."""
+ return self._user
+
+ @user.setter
+ def user(self, value):
+ if value is None:
+ self._user = None
+ return
+ val = str(value).strip()
+ if val == '':
+ self._user = None
+ else:
+ self._user = val
+
+ # -----------------------------------------------------------
+ @property
+ def password(self):
+ """The password of the VSphere user."""
+ return self._password
+
+ @password.setter
+ def password(self, value):
+ if value is None:
+ self._password = None
+ return
+ val = str(value)
+ if val == '':
+ self._password = None
+ else:
+ self._password = val
+
+ # -----------------------------------------------------------
+ @property
+ def dc(self):
+ """The name of the datacenter in VSphere."""
+ return self._dc
+
+ @dc.setter
+ def dc(self, value):
+ if value is None:
+ self._dc = self.default_dc
+ return
+ val = str(value).strip()
+ if val == '':
+ self._dc = self.default_dc
+ else:
+ self._dc = val
+
+ # -----------------------------------------------------------
+ @property
+ def cluster(self):
+ """The name of the default cluster in VSphere."""
+ return self._cluster
+
+ @cluster.setter
+ def cluster(self, value):
+ if value is None:
+ self._cluster = self.default_cluster
+ return
+ val = str(value).strip()
+ if val == '':
+ self._cluster = self.default_cluster
+ else:
+ self._cluster = val
+
+ # -----------------------------------------------------------
+ @property
+ def template_name(self):
+ """The name of the default cluster in VSphere."""
+ return self._template_name
+
+ @template_name.setter
+ def template_name(self, value):
+ if value is None:
+ self._template_name = self.default_template_name
+ return
+ val = str(value).strip().lower()
+ if val == '':
+ self._template_name = self.default_template_name
+ else:
+ self._template_name = val
+
+ # -----------------------------------------------------------
+ @property
+ def min_root_size_gb(self):
+ """The minimum size of a root disk in GiB."""
+ return self._min_root_size_gb
+
+ @min_root_size_gb.setter
+ def min_root_size_gb(self, value):
+ if value is None:
+ self._min_root_size_gb = self.default_min_root_size_gb
+ return
+ val = self.default_min_root_size_gb
+ try:
+ val = float(value)
+ if val < 10:
+ msg = _("may not be less than 10: {:0.1f}.").format(val)
+ raise CrTfConfigError(msg)
+ max_val = 4 * 1024
+ if val > max_val:
+ msg = _("may not be greater than {m}: {v:0.1f}.").format(
+ m=max_val, v=val)
+ raise CrTfConfigError(msg)
+ except ValueError as e:
+ msg = _("Wrong minimum root size in GiB {v!r}: {e}").format(v=value, e=e)
+ LOG.error(msg)
+ else:
+ self._min_root_size_gb = val
+
+ # -----------------------------------------------------------
+ @property
+ def guest_id(self):
+ """The Id of the Guest OS in VSphere."""
+ return self._guest_id
+
+ @guest_id.setter
+ def guest_id(self, value):
+ if value is None:
+ self._guest_id = self.default_guest_id
+ return
+ val = str(value).strip()
+ if val == '':
+ self._guest_id = self.default_guest_id
+ else:
+ self._guest_id = val
+
+ # -------------------------------------------------------------------------
+ def as_dict(self, short=True, show_secrets=False):
+ """
+ Transforms the elements of the object into a dict
+
+ @param short: don't include local properties in resulting dict.
+ @type short: bool
+
+ @return: structure as dict
+ @rtype: dict
+ """
+
+ res = super(VsphereConfig, self).as_dict(short=short)
+
+ res['name'] = self.name
+ res['host'] = self.host
+ res['port'] = self.port
+ res['user'] = self.user
+ res['dc'] = self.dc
+ res['cluster'] = self.cluster
+ res['template_name'] = self.template_name
+ res['min_root_size_gb'] = self.min_root_size_gb
+ res['guest_id'] = self.guest_id
+
+ if self.password:
+ if show_secrets or self.verbose > 4:
+ res['password'] = self.password
+ else:
+ res['password'] = '*******'
+ else:
+ res['password'] = None
+
+ return res
+
+ # -------------------------------------------------------------------------
+ def __copy__(self):
+
+ vsphere = self.__class__(
+ appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+ initialized=self.initialized, host=self.host, port=self.port, user=self.user,
+ password=self.password, dc=self.dc, cluster=self.cluster,
+ template_name=self.template_name, excluded_ds=self.excluded_ds,
+ min_root_size_gb=self.min_root_size_gb, guest_id=self.guest_id,
+ )
+ return vsphere
+
+ # -------------------------------------------------------------------------
+ def __eq__(self, other):
+
+ if self.verbose > 4:
+ LOG.debug(_("Comparing {} objects ...").format(self.__class__.__name__))
+
+ if not isinstance(other, VsphereConfig):
+ return False
+
+ if self.name != other.name:
+ return False
+ if self.host != other.host:
+ return False
+ if self.port != other.port:
+ return False
+ if self.user != other.user:
+ return False
+ if self.password != other.password:
+ return False
+ if self.dc != other.dc:
+ return False
+ if self.cluster != other.cluster:
+ return False
+ if self.template_name != other.template_name:
+ return False
+ if self.min_root_size_gb != other.min_root_size_gb:
+ return False
+ if self.guest_id != other.guest_id:
+ return False
+ if self.excluded_ds != other.excluded_ds:
+ return False
+
+ return True
+
+ # -------------------------------------------------------------------------
+ def is_valid(self, raise_on_error=False):
+
+ name = '<{}>'.format(_('unknown'))
+ if self.name:
+ name = self.name
+ if self.verbose > 1:
+ LOG.debug(_("Checking validity of {o}-object {n!r} ...").format(
+ o=self.__class__.__name__, n=name))
+
+ error_lst = []
+
+ mandatory_attribs = ('name', 'host', 'dc', 'cluster')
+ requested_attribs = ('user', 'password')
+
+ for attrib in mandatory_attribs:
+ cur_val = getattr(self, attrib, None)
+ if not cur_val:
+ msg = _("Attribute {a!r} of the {o}-object {n!r} is not set.").format(
+ a=attrib, o=self.__class__.__name__, n=name)
+ error_lst.append(msg)
+ if not raise_on_error:
+ LOG.error(msg)
+
+ if error_lst:
+ if raise_on_error:
+ nr = len(error_lst)
+ msg = ngettext(
+ 'Found an error in VSPhere configuration',
+ 'Found {} errors in VSPhere configuration', nr)
+ msg = msg.format(nr) + '\n * ' + '\n * '.join(error_lst)
+ raise CrTfConfigError(msg)
+ return False
+
+ for attrib in requested_attribs:
+ cur_val = getattr(self, attrib, None)
+ if not cur_val:
+ msg = _(
+ "Attribute {a!r} of the {o}-object {n!r} is not set, it "
+ "will be requestet during this script and on starting terraform.").format(
+ a=attrib, o=self.__class__.__name__, n=name)
+ LOG.warn(msg)
+
+ return True
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+ pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2024 by Frank Brehm, Berlin
+@summary: The module for i18n.
+ It provides translation object, usable from all other
+ modules in this package.
+"""
+from __future__ import absolute_import, print_function
+
+# Standard modules
+import logging
+import gettext
+
+from pathlib import Path
+
+# Third party modules
+from babel.support import Translations
+
+DOMAIN = 'create_terraform'
+
+LOG = logging.getLogger(__name__)
+
+__version__ = '1.0.3'
+
+__me__ = Path(__file__).resolve()
+__module_dir__ = __me__.parent
+__lib_dir__ = __module_dir__.parent
+__base_dir__ = __lib_dir__.parent
+LOCALE_DIR = __base_dir__.joinpath('locale')
+if not LOCALE_DIR.is_dir():
+ LOCALE_DIR = __module_dir__.joinpath('locale')
+ if not LOCALE_DIR.is_dir():
+ LOCALE_DIR = None
+
+__mo_file__ = gettext.find(DOMAIN, str(LOCALE_DIR))
+if __mo_file__:
+ try:
+ with open(__mo_file__, 'rb') as F:
+ XLATOR = Translations(F, DOMAIN)
+ except FileNotFoundError:
+ XLATOR = gettext.NullTranslations()
+else:
+ XLATOR = gettext.NullTranslations()
+
+_ = XLATOR.gettext
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+ print(_("Module directory: {!r}").format(__module_dir__))
+ print(_("Base directory: {!r}").format(__base_dir__))
+ print(_("Locale directory: {!r}").format(LOCALE_DIR))
+ print(_("Locale domain: {!r}").format(DOMAIN))
+ print(_("Found .mo-file: {!r}").format(__mo_file__))
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4