]> Frank Brehm's Git Trees - pixelpark/create-terraform.git/commitdiff
Adding Python modules
authorFrank Brehm <frank.brehm@pixelpark.com>
Tue, 24 Sep 2019 14:41:42 +0000 (16:41 +0200)
committerFrank Brehm <frank.brehm@pixelpark.com>
Tue, 24 Sep 2019 14:41:42 +0000 (16:41 +0200)
lib/cr_tf/__init__.py [new file with mode: 0644]
lib/cr_tf/app.py [new file with mode: 0644]
lib/cr_tf/config.py [new file with mode: 0644]
lib/cr_tf/errors.py [new file with mode: 0644]
lib/cr_tf/handler.py [new file with mode: 0644]
lib/cr_tf/terraform/__init__.py [new file with mode: 0644]
lib/cr_tf/terraform/disk.py [new file with mode: 0644]
lib/cr_tf/terraform/interface.py [new file with mode: 0644]
lib/cr_tf/terraform/vm.py [new file with mode: 0644]
lib/cr_tf/xlate.py [new file with mode: 0644]

diff --git a/lib/cr_tf/__init__.py b/lib/cr_tf/__init__.py
new file mode 100644 (file)
index 0000000..91409dd
--- /dev/null
@@ -0,0 +1,11 @@
+#!/bin/env python3
+# -*- coding: utf-8 -*-
+
+__version__ = '1.3.0'
+
+MIN_VERSION_TERRAFORM = '0.12.0'
+MAX_VERSION_TERRAFORM = '0.12.99'
+
+MIN_VERSION_VSPHERE_PROVIDER = '1.11.0'
+
+# vim: ts=4 et list
diff --git a/lib/cr_tf/app.py b/lib/cr_tf/app.py
new file mode 100644 (file)
index 0000000..358ef23
--- /dev/null
@@ -0,0 +1,498 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2019 by Frank Brehm, Berlin
+@summary: The module for the application object.
+"""
+from __future__ import absolute_import
+
+# Standard modules
+import sys
+import os
+import logging
+import re
+import argparse
+import signal
+
+from pathlib import Path
+
+# Third party modules
+
+# Own modules
+from . import __version__ as __pkg_version__
+
+from fb_tools.common import pp
+
+from fb_tools.app import BaseApplication
+
+from fb_tools.errors import ExpectedHandlerError, CommandNotFoundError
+
+from fb_tools.config import ConfigError
+
+from fb_tools.common import generate_password
+
+from .config import CrTfConfiguration
+
+from .handler import CreateTerraformHandler
+
+from .xlate import __module_dir__ as __xlate_module_dir__
+from .xlate import __base_dir__ as __xlate_base_dir__
+from .xlate import __mo_file__ as __xlate_mo_file__
+from .xlate import XLATOR, LOCALE_DIR, DOMAIN
+
+__version__ = '1.0.4'
+LOG = logging.getLogger(__name__)
+
+SIGNAL_NAMES = {
+    signal.SIGHUP: 'HUP',
+    signal.SIGINT: 'INT',
+    signal.SIGABRT: 'ABRT',
+    signal.SIGTERM: 'TERM',
+    signal.SIGKILL: 'KILL',
+    signal.SIGQUIT: 'QUIT',
+    signal.SIGUSR1: 'USR1',
+    signal.SIGUSR2: 'USR2',
+}
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class CfgFileOptionAction(argparse.Action):
+
+    # -------------------------------------------------------------------------
+    def __init__(self, option_strings, *args, **kwargs):
+
+        super(CfgFileOptionAction, self).__init__(
+            option_strings=option_strings, *args, **kwargs)
+
+    # -------------------------------------------------------------------------
+    def __call__(self, parser, namespace, values, option_string=None):
+
+        if values is None:
+            setattr(namespace, self.dest, None)
+            return
+
+        path = Path(values)
+        if not path.exists():
+            msg = _("File {!r} does not exists.").format(values)
+            raise argparse.ArgumentError(self, msg)
+        if not path.is_file():
+            msg = _("File {!r} is not a regular file.").format(values)
+            raise argparse.ArgumentError(self, msg)
+
+        setattr(namespace, self.dest, path.resolve())
+
+
+# =============================================================================
+class YamlFileOptionAction(argparse.Action):
+
+    # -------------------------------------------------------------------------
+    def __init__(self, option_strings, *args, **kwargs):
+
+        super(YamlFileOptionAction, self).__init__(
+            option_strings=option_strings, *args, **kwargs)
+
+    # -------------------------------------------------------------------------
+    def __call__(self, parser, namespace, values, option_string=None):
+
+        yaml_file_paths = []
+
+        for value in values:
+            path = Path(value)
+            if not path.exists():
+                msg = _("File {!r} does not exists.").format(values)
+                raise argparse.ArgumentError(self, msg)
+            if not path.is_file():
+                msg = _("File {!r} is not a regular file.").format(values)
+                raise argparse.ArgumentError(self, msg)
+            yaml_file_paths.append(path.resolve())
+
+        setattr(namespace, self.dest, yaml_file_paths)
+
+
+# =============================================================================
+class StopStepOptionAction(argparse.Action):
+
+    # -------------------------------------------------------------------------
+    def __init__(self, option_strings, *args, **kwargs):
+
+        super(StopStepOptionAction, self).__init__(
+            option_strings=option_strings, *args, **kwargs)
+
+    # -------------------------------------------------------------------------
+    def __call__(self, parser, namespace, values, option_string=None):
+
+        step = values
+        if step == '?':
+            width = 1
+            for step in CreateTerraformHandler.steps:
+                if len(step) > width:
+                    width = len(step)
+
+            print("\n" + _("The following steps to interrupt the execution after are available:"))
+
+            for step in CreateTerraformHandler.steps:
+                desc = _('<no description>')
+                if step in CreateTerraformHandler.step_desc:
+                    desc = CreateTerraformHandler.step_desc[step]
+                line = ' * {step:<{width}} {desc}'.format(
+                    step=step, width=width, desc=desc)
+                print(line)
+
+            print()
+            sys.exit(0)
+
+        setattr(namespace, self.dest, step)
+
+
+# =============================================================================
+class CrTfApplication(BaseApplication):
+    """
+    Class for the application objects.
+    """
+
+    re_prefix = re.compile(r'^[a-z0-9][a-z0-9_]*$', re.IGNORECASE)
+    re_anum = re.compile(r'[^A-Z0-9_]+', re.IGNORECASE)
+    fake_root_passwd = generate_password(12)
+
+    # -------------------------------------------------------------------------
+    def __init__(
+        self, appname=None, verbose=0, version=__pkg_version__, base_dir=None,
+            terminal_has_colors=False, initialized=False, usage=None, description=None,
+            argparse_epilog=None, argparse_prefix_chars='-', env_prefix=None):
+
+        self.yaml_file = None
+        self.config = None
+        self.handler = None
+        self._cfg_file = None
+
+        desc = _(
+            "Creates or updates a directory with a terraform environment "
+            "on base of a given YAML file.")
+
+        super(CrTfApplication, self).__init__(
+            appname=appname, verbose=verbose, version=version, base_dir=base_dir,
+            description=desc, terminal_has_colors=terminal_has_colors, initialized=False,
+        )
+
+    # -------------------------------------------------------------------------
+    @property
+    def cfg_file(self):
+        """Configuration file."""
+        return self._cfg_file
+
+    # -------------------------------------------------------------------------
+    def post_init(self):
+        """
+        Method to execute before calling run(). Here could be done some
+        finishing actions after reading in commandline parameters,
+        configuration a.s.o.
+
+        This method could be overwritten by descendant classes, these
+        methhods should allways include a call to post_init() of the
+        parent class.
+
+        """
+
+        self.initialized = False
+
+        self.init_logging()
+
+        self.perform_arg_parser()
+
+        self.config = CrTfConfiguration(
+            appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+            config_file=self.cfg_file)
+
+        try:
+            self.config.read()
+        except ConfigError as e:
+            LOG.error(_("Error in configuration:") + " " + str(e))
+            self.exit(1)
+        if self.config.verbose > self.verbose:
+            self.verbose = self.config.verbose
+        if self.config.simulate:
+            self.simulate = True
+        self.config.initialized = True
+
+        if self.verbose > 3:
+            LOG.debug(_("Read configuration:") + '\n' + pp(self.config.as_dict()))
+
+        self.perform_arg_parser_rest()
+
+        if not self.config.vsphere_password:
+            self.config.vsphere_password = self.get_secret(
+                prompt=_('Password for vSphere host {h!r} and user {u!r}').format(
+                    h=self.config.vsphere_host, u=self.config.vsphere_user),
+                item_name=_('vSphere password'))
+
+        if not self.config.pdns_api_key:
+            url = 'http'
+            if self.config.pdns_api_use_https:
+                url += 's'
+            url += '://' + self.config.pdns_master_server
+            url += ':{}'.format(self.config.pdns_api_port)
+            if self.config.pdns_api_path_prefix:
+                url += self.config.pdns_api_path_prefix
+            prompt = _('PowerDNS API key for {!r}').format(url)
+            self.config.pdns_api_key = self.get_secret(
+                prompt=prompt, item_name=_('PowerDNS API key'))
+
+        if not self.config.vm_root_password:
+            # Using faked root password, because it is currently not used.
+            # TODO: When the root password is used, then substitute fake password
+            #       by prompting for the real root password.
+            LOG.debug(_(
+                "Using faked root password {!r} - "
+                "but this is currently not used.").format(self.fake_root_passwd))
+            self.config.vm_root_password = self.fake_root_passwd
+#            self.config.vm_root_password = self.get_secret(
+#                prompt=_('root password for generated VMs'), item_name=_('root password'))
+
+        self.handler = CreateTerraformHandler(
+            appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+            simulate=self.simulate, force=self.force, config=self.config,
+            terminal_has_colors=self.terminal_has_colors)
+
+        if self.args.stop_after:
+            self.handler.stop_at_step = self.args.stop_after
+
+        self.handler.set_tz(self.config.tz_name)
+
+        try:
+            self.handler.init_handlers()
+        except (CommandNotFoundError, ExpectedHandlerError) as e:
+            LOG.error(str(e))
+            self.exit(5)
+        self.handler.initialized = True
+
+        self.initialized = True
+
+    # -------------------------------------------------------------------------
+    def as_dict(self, short=True):
+        """
+        Transforms the elements of the object into a dict
+
+        @param short: don't include local properties in resulting dict.
+        @type short: bool
+
+        @return: structure as dict
+        @rtype:  dict
+        """
+
+        res = super(CrTfApplication, self).as_dict(short=short)
+
+        res['cfg_file'] = self.cfg_file
+        res['__pkg_version__'] = __pkg_version__
+        res['config'] = None
+        if self.config:
+            res['config'] = self.config.as_dict(short=short, show_secrets=self.force)
+
+        if 'xlate' not in res:
+            res['xlate'] = {}
+        res['xlate'][DOMAIN] = {
+            '__module_dir__': __xlate_module_dir__,
+            '__base_dir__': __xlate_base_dir__,
+            'LOCALE_DIR': LOCALE_DIR,
+            'DOMAIN': DOMAIN,
+            '__mo_file__': __xlate_mo_file__,
+        }
+
+        return res
+
+    # -------------------------------------------------------------------------
+    def init_arg_parser(self):
+        """
+        Public available method to initiate the argument parser.
+        """
+
+        super(CrTfApplication, self).init_arg_parser()
+
+        cur_dir = Path(os.getcwd())
+
+        default_cfg_file = self.base_dir.joinpath('etc').joinpath(self.appname + '.ini')
+        default_cfg_file_rel = Path(os.path.relpath(str(default_cfg_file), str(cur_dir)))
+
+        tf_dir = Path(CrTfConfiguration.default_terraform_dir)
+        if not tf_dir.is_absolute():
+            tf_dir = self.base_dir.joinpath(tf_dir).resolve()
+        tf_dir_rel = Path(os.path.relpath(str(tf_dir), str(cur_dir)))
+
+        self.arg_parser.add_argument(
+            '-D', '--dir', '--directory', '--terraform_directory',
+            metavar=_("DIR"), dest="terraform_dir",
+            help=_(
+                "The directory, where the terraform project directories should be created. "
+                "Default: {!r}").format(str(tf_dir_rel))
+        )
+
+        steps = list(CreateTerraformHandler.steps[:]) + ['?']
+
+        self.arg_parser.add_argument(
+            '-S', '--stop-after', metavar=_('STEP'), dest='stop_after', choices=steps,
+            action=StopStepOptionAction,
+            help=_(
+                "Name of the step, where to interrupt the execution of this script. "
+                "Use {!r} to show a list of all avaliable steps.").format('--stop-after ?')
+        )
+
+        self.arg_parser.add_argument(
+            '-c', '--config', '--config-file', dest='cfg_file', metavar=_('FILE'),
+            action=CfgFileOptionAction,
+            help=_("Configuration file (default: {!r})").format(str(default_cfg_file_rel))
+        )
+
+        # vSphere options
+        vmware_group = self.arg_parser.add_argument_group(_('vSphere options'))
+
+        vmware_group.add_argument(
+            '-H', '--host', dest='vsphere_host', metavar=_("HOST"),
+            help=_("Remote vSphere host to connect to (Default: {!r}).").format(
+                CrTfConfiguration.default_vsphere_host)
+        )
+
+        vmware_group.add_argument(
+            '-p', '--port', dest='vsphere_port', type=int, metavar=_("PORT"),
+            help=_("Port on vSphere host to connect on (Default: {}).").format(
+                CrTfConfiguration.default_vsphere_port)
+        )
+
+        vmware_group.add_argument(
+            '-U', '--user', dest='vsphere_user', metavar=_("USER"),
+            help=_("User name to use when connecting to vSphere host (Default: {!r}).").format(
+                CrTfConfiguration.default_vsphere_user)
+        )
+
+        vmware_group.add_argument(
+            '-P', '--password', dest='vsphere_password', metavar=_("PASSWORD"),
+            help=_("Password to use when connecting to vSphere host."),
+        )
+
+        # PowerDNS options
+        pdns_group = self.arg_parser.add_argument_group(_('PowerDNS options'))
+
+        pdns_group.add_argument(
+            '-M', '--pdns-master', metavar=_("HOST"), dest='pdns_master',
+            help=_(
+                "The hostname or address of the PowerDNS master server "
+                "(Default: {!r}).").format(CrTfConfiguration.default_pdns_master_server)
+        )
+
+        pdns_group.add_argument(
+            '--api-port', metavar=_("PORT"), type=int, dest="pdns_api_port",
+            help=_("The port number of the PowerDNS API (Default: {}).").format(
+                CrTfConfiguration.default_pdns_api_port)
+        )
+
+        pdns_group.add_argument(
+            '--api-key', metavar=_("KEY"), dest="pdns_api_key",
+            help=_("The key accessing to the PDNS API.")
+        )
+
+        pdns_group.add_argument(
+            '--api-https', action="store_true", dest="pdns_api_https",
+            help=_("Should PDNS API requests executed per HTTPS?"),
+        )
+
+        pdns_group.add_argument(
+            '--api-prefix', metavar=_("PATH"), dest='pdns_api_prefix',
+            help=_("The path prefix in the URL for PDNS API requests (Default: {!r}).").format(
+                CrTfConfiguration.default_pdns_api_path_prefix)
+        )
+
+        # Positional arguments
+        self.arg_parser.add_argument(
+            "yaml_file", nargs=1, metavar=_("YAML_FILE"), action=YamlFileOptionAction,
+            help=_("The YAML-file with the definition of the VMs to create with terraform."),
+        )
+
+    # -------------------------------------------------------------------------
+    def perform_arg_parser(self):
+
+        if self.args.cfg_file:
+            self._cfg_file = Path(self.args.cfg_file)
+            if not self.cfg_file.is_absolute():
+                self._cfg_file = self.cfg_file.resolve()
+
+    # -------------------------------------------------------------------------
+    def perform_arg_parser_rest(self):
+        """
+        Public available method to execute some actions after parsing
+        the command line parameters.
+        """
+
+        self.perform_arg_parser_vmware()
+        self.perform_arg_parser_pdns()
+
+        self.yaml_file = Path(self.args.yaml_file[0])
+        if not self.yaml_file.is_absolute():
+            self.yaml_file = self.yaml_file.resolve()
+
+    # -------------------------------------------------------------------------
+    def perform_arg_parser_vmware(self):
+
+        if self.args.vsphere_host:
+            self.config.vsphere_host = self.args.vsphere_host
+        if self.args.vsphere_port:
+            self.config.vsphere_port = self.args.vsphere_port
+        if self.args.vsphere_user:
+            self.config.vsphere_user = self.args.vsphere_user
+        if self.args.vsphere_password:
+            self.config.password = self.args.vsphere_password
+
+    # -------------------------------------------------------------------------
+    def perform_arg_parser_pdns(self):
+
+        if self.args.pdns_master:
+            self.config.pdns_master_server = self.args.pdns_master
+        if self.args.pdns_api_port:
+            self.config.pdns_api_port = self.args.pdns_api_port
+        if self.args.pdns_api_key:
+            self.config.pdns_api_key = self.args.pdns_api_key
+        if self.args.pdns_api_https:
+            self.config.pdns_api_use_https = True
+        if self.args.pdns_api_prefix:
+            self.config.pdns_api_path_prefix = self.args.pdns_api_prefix
+
+    # -------------------------------------------------------------------------
+    def _run(self):
+        """Main routine."""
+
+        LOG.info(_("Starting {a!r}, version {v!r} ...").format(
+            a=self.appname, v=__pkg_version__))
+
+        try:
+            self.handler(self.yaml_file)
+        except ExpectedHandlerError as e:
+            self.handler = None
+            self.handle_error(str(e), _("Create Terraform environment"))
+            self.exit(5)
+
+    # -------------------------------------------------------------------------
+    def post_run(self):
+        """
+        Dummy function to run after the main routine.
+        Could be overwritten by descendant classes.
+
+        """
+
+        if self.verbose > 1:
+            LOG.info(_("Executing {} ...").format('post_run()'))
+
+        if self.handler:
+            self.handler = None
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+    pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
diff --git a/lib/cr_tf/config.py b/lib/cr_tf/config.py
new file mode 100644 (file)
index 0000000..0684846
--- /dev/null
@@ -0,0 +1,614 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2019 by Frank Brehm, Berlin
+@summary: A module for providing a configuration
+"""
+from __future__ import absolute_import
+
+# Standard module
+import logging
+import re
+
+from pathlib import Path
+
+# Third party modules
+import pytz
+
+# Own modules
+from fb_tools.config import ConfigError, BaseConfiguration
+
+from fb_tools.common import to_bool, RE_FQDN
+
+from fb_tools.pdns import DEFAULT_PORT as DEFAULT_PDNS_API_PORT
+from fb_tools.pdns import DEFAULT_TIMEOUT as DEFAULT_PDNS_API_TIMEOUT               # noqa
+from fb_tools.pdns import DEFAULT_API_PREFIX as DEFAULT_PDNS_API_PREFIX             # noqa
+from fb_tools.pdns import DEFAULT_USE_HTTPS as DEFAULT_PDNS_API_USE_HTTPS
+
+from .xlate import XLATOR
+
+__version__ = '1.1.5'
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class CrTfConfigError(ConfigError):
+    """Base error class for all exceptions happened during
+    execution this configured application"""
+
+    pass
+
+
+# =============================================================================
+class CrTfConfiguration(BaseConfiguration):
+    """
+    A class for providing a configuration for the CrTfApplication class
+    and methods to read it from configuration files.
+    """
+
+    default_vsphere_host = 'vcs01.ppbrln.internal'
+    default_vsphere_port = 443
+    default_vsphere_user = 'Administrator@vsphere.local'
+    default_vsphere_dc = 'vmcc'
+    default_vsphere_cluster = 'vmcc-l105-01'
+    default_template_name = 'oracle-linux-7-template'
+    default_pdns_master_server = 'master.pp-dns.com'
+    default_pdns_api_port = DEFAULT_PDNS_API_PORT
+    default_pdns_api_use_https = bool(DEFAULT_PDNS_API_USE_HTTPS)
+    default_pdns_api_path_prefix = DEFAULT_PDNS_API_PREFIX
+    default_pdns_api_timeout = DEFAULT_PDNS_API_PORT
+    default_pdns_comment_account = 'provisioning'
+    default_min_root_size_gb = 32.0
+    default_terraform_dir = Path('terraform')
+    default_tz_name = 'Europe/Berlin'
+    default_guest_id = "oracleLinux7_64Guest"
+
+    default_disk_size = 10.0
+    default_root_min_size = 10.0
+    default_root_max_size = 512.0
+    default_disk_min_size = 4.0
+    default_disk_max_size = 1024.0
+
+    default_tf_backend_host = 'terraform.pixelpark.com'
+    default_tf_backend_scheme = 'https'
+    default_tf_backend_path_prefix = 'terraform'
+
+    default_puppetmaster = 'puppetmaster01.pixelpark.com'
+    default_puppetca = 'puppetca01.pixelpark.com'
+
+    msg_invalid_type = _("Invalid value {v!r} for {n!r} configuration ({f!r}:[{s}]): {e}")
+    msg_val_negative = _(
+        "Invalid value {v} for {n!r} configuration ({f!r}:[{s}]): "
+        "must be equal or greater than zero.")
+
+    max_pdns_api_timeout = 3600
+
+    # -------------------------------------------------------------------------
+    def __init__(
+        self, appname=None, verbose=0, version=__version__, base_dir=None, simulate=False,
+            encoding=None, config_dir=None, config_file=None, initialized=False):
+
+        self.vsphere_host = self.default_vsphere_host
+        self.vsphere_port = self.default_vsphere_port
+        self.vsphere_user = self.default_vsphere_user
+        self._vsphere_password = None
+        self.vsphere_dc = self.default_vsphere_dc
+        self.vsphere_cluster = self.default_vsphere_cluster
+        self.template_name = self.default_template_name
+        self.pdns_master_server = self.default_pdns_master_server
+        self.pdns_api_port = self.default_pdns_api_port
+        self._pdns_api_key = None
+        self._pdns_api_use_https = self.default_pdns_api_use_https
+        self._pdns_api_timeout = self.default_pdns_api_timeout
+        self.pdns_api_path_prefix = self.default_pdns_api_path_prefix
+        self.min_root_size_gb = self.default_min_root_size_gb
+        self._relative_tf_dir = True
+        self._terraform_dir = self.default_terraform_dir
+        self._vm_root_password = None
+        self.tz_name = self.default_tz_name
+        self.guest_id = self.default_guest_id
+        self.puppetmaster = self.default_puppetmaster
+        self.puppetca = self.default_puppetca
+        self.pdns_comment_account = self.default_pdns_comment_account
+
+        self._disk_size = self.default_disk_size
+
+        self._root_min_size = self.default_root_min_size
+        self._root_max_size = self.default_root_max_size
+        self._disk_min_size = self.default_disk_min_size
+        self._disk_max_size = self.default_disk_max_size
+
+        self.tf_backend_host = self.default_tf_backend_host
+        self.tf_backend_scheme = self.default_tf_backend_scheme
+        self.tf_backend_path_prefix = self.default_tf_backend_path_prefix
+
+        self._simulate = False
+
+        self.excluded_datastores = []
+
+        super(CrTfConfiguration, self).__init__(
+            appname=appname, verbose=verbose, version=version, base_dir=base_dir,
+            encoding=encoding, config_dir=config_dir, config_file=config_file, initialized=False,
+        )
+
+        self.terraform_dir = self.default_terraform_dir
+
+        self.simulate = simulate
+
+        if initialized:
+            self.initialized = True
+
+    # -----------------------------------------------------------
+    @property
+    def simulate(self):
+        """A flag describing, that all should be simulated."""
+        return self._simulate
+
+    @simulate.setter
+    def simulate(self, value):
+        self._simulate = to_bool(value)
+
+    # -----------------------------------------------------------
+    @property
+    def vsphere_password(self):
+        """The password of the VSphere user."""
+        return self._vsphere_password
+
+    @vsphere_password.setter
+    def vsphere_password(self, value):
+        if value is None:
+            self._vsphere_password = None
+            return
+        val = str(value)
+        if val == '':
+            self._vsphere_password = None
+        else:
+            self._vsphere_password = val
+
+    # -----------------------------------------------------------
+    @property
+    def pdns_api_key(self):
+        """The key used to authenticate against the PowerDNS API."""
+        return self._pdns_api_key
+
+    @pdns_api_key.setter
+    def pdns_api_key(self, value):
+        if value is None:
+            self._pdns_api_key = None
+            return
+        val = str(value)
+        if val == '':
+            self._pdns_api_key = None
+        else:
+            self._pdns_api_key = val
+
+    # -----------------------------------------------------------
+    @property
+    def pdns_api_use_https(self):
+        "Should HTTPS used for PDNS API calls."
+        return self._pdns_api_use_https
+
+    @pdns_api_use_https.setter
+    def pdns_api_use_https(self, value):
+        self._pdns_api_use_https = to_bool(value)
+
+    # -----------------------------------------------------------
+    @property
+    def pdns_api_timeout(self):
+        """The timeout in seconds for requesting the PowerDNS API."""
+        return self._pdns_api_timeout
+
+    @pdns_api_timeout.setter
+    def pdns_api_timeout(self, value):
+        if value is None:
+            self._pdns_api_timeout = self.default_pdns_api_timeout
+            return
+        val = int(value)
+        err_msg = _(
+            "Invalid timeout {t!r} for requesting the PowerDNS API, "
+            "must be 0 < SECONDS < {m}.")
+        if val <= 0 or val > self.max_pdns_api_timeout:
+            msg = err_msg.format(t=value, m=self.max_pdns_api_timeout)
+            raise ValueError(msg)
+        self._pdns_api_timeout = val
+
+    # -----------------------------------------------------------
+    @property
+    def relative_tf_dir(self):
+        """Use a project directory relative to the definition YAML file instead
+            of the standard terraform directory.
+            By this way it is possible to use project direectories
+            outside this GIT repository."""
+        return self._relative_tf_dir
+
+    @relative_tf_dir.setter
+    def relative_tf_dir(self, value):
+        self._relative_tf_dir = to_bool(value)
+
+    # -----------------------------------------------------------
+    @property
+    def terraform_dir(self):
+        """The directory, where the terraform project directories should be created.
+            It is not used, if relative_tf_dir was set to True (the default)."""
+        return self._terraform_dir
+
+    @terraform_dir.setter
+    def terraform_dir(self, value):
+
+        if not value:
+            return
+
+        val = Path(str(value))
+        if val.is_absolute():
+            self._terraform_dir = val.resolve()
+            return
+
+        self._terraform_dir = self.base_dir.joinpath(val).resolve()
+
+    # -----------------------------------------------------------
+    @property
+    def vm_root_password(self):
+        """The password of the VSphere user."""
+        return self._vm_root_password
+
+    @vm_root_password.setter
+    def vm_root_password(self, value):
+        if value is None:
+            self._vm_root_password = None
+            return
+        val = str(value)
+        if val == '':
+            self._vm_root_password = None
+        else:
+            self._vm_root_password = val
+
+    # -----------------------------------------------------------
+    @property
+    def disk_size(self):
+        """Default data disk size in GiB."""
+        return self._disk_size
+
+    @disk_size.setter
+    def disk_size(self, value):
+        if value is None:
+            msg = _("The default size of the data disk may not be None.")
+            raise TypeError(msg)
+        val = float(value)
+        if val < 1:
+            msg = _("The default size of the data disk must be greater or equal to one GB.")
+            raise ValueError(msg)
+        self._disk_size = val
+
+    # -----------------------------------------------------------
+    @property
+    def disk_min_size(self):
+        """Minimal data disk size in GiB."""
+        return self._disk_min_size
+
+    @disk_min_size.setter
+    def disk_min_size(self, value):
+        if value is None:
+            msg = _("The minimal size of the data disk may not be None.")
+            raise TypeError(msg)
+        val = float(value)
+        if val < 1:
+            msg = _("The minimal size of the data disk must be greater or equal to one GB.")
+            raise ValueError(msg)
+        self._disk_min_size = val
+
+    # -----------------------------------------------------------
+    @property
+    def disk_max_size(self):
+        """Maximal data disk size in GiB."""
+        return self._disk_max_size
+
+    @disk_max_size.setter
+    def disk_max_size(self, value):
+        if value is None:
+            msg = _("The maximal size of the data disk may not be None.")
+            raise TypeError(msg)
+        val = float(value)
+        if val < 1:
+            msg = _("The maximal size of the data disk must be greater or equal to one GB.")
+            raise ValueError(msg)
+        self._disk_max_size = val
+
+    # -----------------------------------------------------------
+    @property
+    def root_min_size(self):
+        """Minimal root disk size in GiB."""
+        return self._root_min_size
+
+    @root_min_size.setter
+    def root_min_size(self, value):
+        if value is None:
+            msg = _("The minimal size of the root disk may not be None.")
+            raise TypeError(msg)
+        val = float(value)
+        if val < 1:
+            msg = _("The minimal size of the root disk must be greater or equal to one GB.")
+            raise ValueError(msg)
+        self._root_min_size = val
+
+    # -----------------------------------------------------------
+    @property
+    def root_max_size(self):
+        """Maximal root disk size in GiB."""
+        return self._root_max_size
+
+    @root_max_size.setter
+    def root_max_size(self, value):
+        if value is None:
+            msg = _("The maximal size of the root disk may not be None.")
+            raise TypeError(msg)
+        val = float(value)
+        if val < 1:
+            msg = _("The maximal size of the root disk must be greater or equal to one GB.")
+            raise ValueError(msg)
+        self._root_max_size = val
+
+    # -------------------------------------------------------------------------
+    def as_dict(self, short=True, show_secrets=False):
+        """
+        Transforms the elements of the object into a dict
+
+        @param short: don't include local properties in resulting dict.
+        @type short: bool
+
+        @return: structure as dict
+        @rtype:  dict
+        """
+
+        res = super(CrTfConfiguration, self).as_dict(short=short)
+
+        res['simulate'] = self.simulate
+        res['pdns_api_use_https'] = self.pdns_api_use_https
+        res['pdns_api_timeout'] = self.pdns_api_timeout
+        res['vsphere_password'] = None
+        res['vm_root_password'] = None
+        res['pdns_api_key'] = None
+        res['relative_tf_dir'] = self.relative_tf_dir
+        res['terraform_dir'] = self.terraform_dir
+        res['disk_size'] = self.disk_size
+        res['disk_min_size'] = self.disk_min_size
+        res['disk_max_size'] = self.disk_max_size
+        res['root_min_size'] = self.root_min_size
+        res['root_max_size'] = self.root_max_size
+
+        if self.vsphere_password:
+            if show_secrets or self.verbose > 4:
+                res['vsphere_password'] = self.vsphere_password
+            else:
+                res['vsphere_password'] = '*******'
+
+        if self.pdns_api_key:
+            if show_secrets or self.verbose > 4:
+                res['pdns_api_key'] = self.pdns_api_key
+            else:
+                res['pdns_api_key'] = '*******'
+
+        if self.vm_root_password:
+            if show_secrets or self.verbose > 4:
+                res['vm_root_password'] = self.vm_root_password
+            else:
+                res['vm_root_password'] = '*******'
+
+        return res
+
+    # -------------------------------------------------------------------------
+    def eval_config_section(self, config, section_name):
+        """Evaluating of all found configuration options."""
+
+        super(CrTfConfiguration, self).eval_config_section(config, section_name)
+
+        if section_name.lower() == 'vsphere':
+            self.eval_config_vsphere(config, section_name)
+        elif section_name.lower() == 'powerdns' or section_name.lower() == 'pdns':
+            self.eval_config_pdns(config, section_name)
+        elif section_name.lower() == 'terraform':
+            self.eval_config_terraform(config, section_name)
+
+    # -------------------------------------------------------------------------
+    def eval_config_global(self, config, section_name):
+        """Evaluating section [global] of configuration.
+            May be overridden in descendant classes."""
+
+        super(CrTfConfiguration, self).eval_config_global(
+            config=config, section_name=section_name)
+
+        re_tz = re.compile(r'^\s*(?:tz|time[_-]?zone)\s*$', re.IGNORECASE)
+        re_puppetmaster = re.compile(r'^\s*puppet[_-]?master\s*$', re.IGNORECASE)
+        re_puppetca = re.compile(r'^\s*puppet[_-]?ca\s*$', re.IGNORECASE)
+
+        for (key, value) in config.items(section_name):
+            if key.lower() == 'simulate':
+                self.simulate = value
+            elif re_tz.search(key) and value.strip():
+                val = value.strip()
+                try:
+                    tz = pytz.timezone(val)         # noqa
+                except pytz.exceptions.UnknownTimeZoneError as e:
+                    raise ConfigError(self.msg_invalid_type.format(
+                        f=self.config_file, s=section_name, v=value, n='time_zone', e=e))
+                self.tz_name = value.strip()
+            elif re_puppetmaster.search(key) and value.strip():
+                val = value.strip()
+                if not RE_FQDN.search(val):
+                    raise ConfigError(self.msg_invalid_type.format(
+                        f=self.config_file, s=section_name, v=value, n='puppet_master',
+                        e='Invalid Host FQDN for puppetmaster'))
+                self.puppetmaster = val.lower()
+            elif re_puppetca.search(key) and value.strip():
+                val = value.strip()
+                if not RE_FQDN.search(val):
+                    raise ConfigError(self.msg_invalid_type.format(
+                        f=self.config_file, s=section_name, v=value, n='puppet_ca',
+                        e='Invalid Host FQDN for puppetca'))
+                self.puppetca = val.lower()
+
+    # -------------------------------------------------------------------------
+    def eval_config_vsphere(self, config, section):
+
+        if self.verbose > 2:
+            LOG.debug(_("Checking config section {!r} ...").format(section))
+
+        re_excl_ds = re.compile(r'^\s*excluded?[-_]datastores?\s*$', re.IGNORECASE)
+        re_split_ds = re.compile(r'[,;\s]+')
+        re_template = re.compile(r'^\s*template(?:[-_\.]?name)?\s*$', re.IGNORECASE)
+        re_min_root_size = re.compile(
+            r'^\s*min[-_\.]?root[-_\.]?size(?:[-_\.]?gb)\s*$', re.IGNORECASE)
+        re_guest_id = re.compile(r'^\s*guest[-_]?id\s*$', re.IGNORECASE)
+
+        for (key, value) in config.items(section):
+
+            if key.lower() == 'host' and value.strip():
+                self.vsphere_host = value.strip().lower()
+            elif key.lower() == 'port':
+                val = 0
+                try:
+                    val = int(value)
+                except ValueError as e:
+                    raise ConfigError(self.msg_invalid_type.format(
+                        f=self.config_file, s=section, v=value, n='port', e=e))
+                if val < 0:
+                    raise ConfigError(self.msg_val_negative.format(
+                        f=self.config_file, s=section, v=value, n='port'))
+                self.vsphere_port = val
+            elif key.lower() == 'user' and value.strip():
+                self.vsphere_user = value.strip()
+            elif key.lower() == 'password':
+                self.vsphere_password = value
+            elif key.lower() == 'dc' and value.strip():
+                self.vsphere_dc = value.strip()
+            elif key.lower() == 'cluster' and value.strip():
+                self.vsphere_cluster = value.strip()
+            elif re_template.search(key) and value.strip():
+                self.template_name = value.strip()
+            elif re_excl_ds.search(key) and value.strip():
+                datastores = re_split_ds.split(value.strip())
+                self.excluded_datastores = datastores
+            elif re_min_root_size.search(key) and value.strip():
+                val = 0.0
+                try:
+                    val = float(value.strip())
+                except ValueError as e:
+                    raise ConfigError(self.msg_invalid_type.format(
+                        f=self.config_file, s=section, v=value, n=key, e=e))
+                if val < 0:
+                    raise ConfigError(self.msg_val_negative.format(
+                        f=self.config_file, s=section, v=value, n=key))
+                self.min_root_size_gb = val
+            elif re_guest_id.search(key) and value.strip():
+                self.guest_id = value.strip()
+
+        return
+
+    # -------------------------------------------------------------------------
+    def eval_config_pdns(self, config, section):
+
+        if self.verbose > 2:
+            LOG.debug(_("Checking config section {!r} ...").format(section))
+
+        re_master = re.compile(
+            r'^\s*(?:master(?:[-_\.]?server)?|api(?:[-_\.]?(?:host|server)))\s*$', re.IGNORECASE)
+        re_port = re.compile(r'^\s*(?:api[-_\.]?)?port\s*$', re.IGNORECASE)
+        re_key = re.compile(r'^\s*(?:api[-_\.]?)?key\s*$', re.IGNORECASE)
+        re_use_https = re.compile(r'^\s*(?:api[-_\.]?)?(?:use[-_\.]?)?https\s*$', re.IGNORECASE)
+        re_prefix = re.compile(r'^\s*(?:api[-_\.]?)?(?:path[-_\.]?)?prefix\s*$', re.IGNORECASE)
+        re_comment_account = re.compile(r'^\s*comment[-_\.]?account\s*$', re.IGNORECASE)
+
+        for (key, value) in config.items(section):
+            if re_master.search(key) and value.strip():
+                self.pdns_master_server = value.strip().lower()
+            elif re_port.search(key) and value.strip():
+                val = 0
+                try:
+                    val = int(value.strip())
+                except ValueError as e:
+                    raise ConfigError(self.msg_invalid_type.format(
+                        f=self.config_file, s=section, v=value, n=key, e=e))
+                if val < 0:
+                    raise ConfigError(self.msg_val_negative.format(
+                        f=self.config_file, s=section, v=value, n=key))
+                self.pdns_api_port = val
+            elif re_key.search(key) and value.strip():
+                self.pdns_api_key = value.strip()
+            elif re_use_https.search(key):
+                self.pdns_api_use_https = value
+            elif re_prefix.search(key) and value.strip():
+                self.pdns_api_path_prefix = value.strip()
+            elif key.lower() == 'timeout' and value.strip():
+                self.pdns_api_timeout = value.strip()
+            elif re_comment_account.search(key) and value.strip():
+                self.pdns_comment_account = value.strip()
+
+        return
+
+    # -------------------------------------------------------------------------
+    def eval_config_terraform(self, config, section):
+
+        if self.verbose > 2:
+            LOG.debug(_("Checking config section {!r} ...").format(section))
+
+        re_dir = re.compile(r'^\s*dir(?:ectory)?\s*$', re.IGNORECASE)
+        re_root_pw = re.compile(r'^\s*root[_-]?passw(?:ord)?\s*$', re.IGNORECASE)
+
+        re_disk_size = re.compile(r'^\s*(?:data[_-]?)?disk[_-]?size\s*$', re.IGNORECASE)
+
+        re_disk_min_size = re.compile(
+            r'^\s*(?:data[_-]?)?disk[_-]?min[_-]?size\s*$', re.IGNORECASE)
+        re_disk_max_size = re.compile(
+            r'^\s*(?:data[_-]?)?disk[_-]?max[_-]?size\s*$', re.IGNORECASE)
+        re_root_disk_min_size = re.compile(
+            r'^\s*root[_-]?disk[_-]?min[_-]?size\s*$', re.IGNORECASE)
+        re_root_disk_max_size = re.compile(
+            r'^\s*root[_-]?disk[_-]?max[_-]?size\s*$', re.IGNORECASE)
+        re_relative_tf_dir = re.compile(
+            r'^\s*(?:use[_-]?)?relative[_-]?(?:tf|terraform)?[_-]?dir(?:ectory)?\s*$',
+            re.IGNORECASE)
+
+        re_backend_host = re.compile(r'^\s*backend[_-]?host\s*$', re.IGNORECASE)
+        re_backend_scheme = re.compile(r'^\s*backend[_-]?scheme\s*$', re.IGNORECASE)
+        re_backend_path_prefix = re.compile(r'^\s*backend[_-]?path[_-]?prefix\s*$', re.IGNORECASE)
+
+        for (key, value) in config.items(section):
+            if re_dir.search(key) and value.strip():
+                self.terraform_dir = value.strip()
+            elif re_relative_tf_dir.search(key):
+                if value is not None and str(value).strip() != '':
+                    self.relative_tf_dir = value
+                # else: default is True
+            elif re_root_pw.search(key) and value.strip():
+                self.vm_root_password = value
+            elif re_disk_size.search(key):
+                self.disk_size = value
+            elif re_disk_min_size.search(key):
+                self.disk_min_size = value
+            elif re_disk_max_size.search(key):
+                self.disk_max_size = value
+            elif re_root_disk_min_size.search(key):
+                self.root_min_size = value
+            elif re_root_disk_max_size.search(key):
+                self.root_max_size = value
+            elif re_backend_host.search(key) and value.strip():
+                self.tf_backend_host = value.strip().lower()
+            elif re_backend_scheme.search(key) and value.strip():
+                self.tf_backend_scheme = value.strip().lower()
+            elif re_backend_path_prefix.search(key) and value.strip():
+                self.tf_backend_path_prefix = value.strip()
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+    pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
diff --git a/lib/cr_tf/errors.py b/lib/cr_tf/errors.py
new file mode 100644 (file)
index 0000000..5fe1cf5
--- /dev/null
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@summary: module for some common used error classes
+"""
+from __future__ import absolute_import
+
+# Standard modules
+
+
+# Own modules
+from fb_tools.errors import FbHandlerError, ExpectedHandlerError
+
+from .xlate import XLATOR
+
+__version__ = '1.0.1'
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class TerraformObjectError(FbHandlerError):
+    """Exception class on errors evaluation VM definition for terraform."""
+
+    pass
+
+
+# =============================================================================
+class TerraformVmError(TerraformObjectError):
+    """Exception class on errors evaluation VM definition for terraform."""
+
+    pass
+
+
+# =============================================================================
+class TerraformVmDefinitionError(TerraformVmError):
+    """Exception class on errors evaluation VM definition for terraform."""
+
+    pass
+
+
+# =============================================================================
+class NetworkNotExistingError(ExpectedHandlerError):
+    """Special error class for the case, if the expected network is not existing."""
+
+    # -------------------------------------------------------------------------
+    def __init__(self, net_name):
+
+        self.net_name = net_name
+
+    # -------------------------------------------------------------------------
+    def __str__(self):
+
+        msg = _("The network {!r} is not existing.").format(self.net_name)
+        return msg
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+    pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
diff --git a/lib/cr_tf/handler.py b/lib/cr_tf/handler.py
new file mode 100644 (file)
index 0000000..11bdd3f
--- /dev/null
@@ -0,0 +1,2544 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2019 by Frank Brehm, Berlin
+@summary: A handler module for underlaying actions
+"""
+from __future__ import absolute_import, print_function
+
+# Standard module
+import os
+import logging
+import re
+import socket
+import ipaddress
+import shutil
+import glob
+import stat
+import textwrap
+import copy
+import sys
+
+from pathlib import Path
+
+from subprocess import PIPE
+
+from distutils.version import LooseVersion
+
+# Third party modules
+import pytz
+import yaml
+import six
+
+# Own modules
+from fb_tools.common import pp, to_bool, RE_DOT_AT_END
+
+from fb_tools.errors import HandlerError, ExpectedHandlerError, CommandNotFoundError
+
+from fb_tools.handling_obj import HandlingObject
+
+from fb_tools.handler import BaseHandler
+
+from fb_tools.vsphere.server import VsphereServer
+
+from fb_tools.vsphere.errors import VSphereExpectedError
+
+from fb_tools.pdns.server import PowerDNSServer
+from fb_tools.pdns.errors import PowerDNSHandlerError
+
+from . import MIN_VERSION_TERRAFORM, MAX_VERSION_TERRAFORM
+from . import MIN_VERSION_VSPHERE_PROVIDER
+
+from .config import CrTfConfiguration
+
+from .terraform.vm import TerraformVm
+
+from .terraform.disk import TerraformDisk
+
+from .xlate import XLATOR
+
+__version__ = '2.8.8'
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class AbortExecution(ExpectedHandlerError):
+    """Indicating an abort of the execution."""
+
+    # -------------------------------------------------------------------------
+    def __init__(self, step=None):
+
+        if step:
+            self.step = step
+        else:
+            self.step = _('<some unknown step>')
+
+    # -------------------------------------------------------------------------
+    def __str__(self):
+
+        return _("Aborting after {!r}.").format(self.step)
+
+
+# =============================================================================
+class CreateTerraformHandler(BaseHandler):
+    """
+    A handler class for creating the terraform environment
+    """
+
+    re_default = re.compile(r'^\s*defaults?\s*$', re.IGNORECASE)
+    re_vm_key = re.compile(r'^\s*vms?\s*$', re.IGNORECASE)
+    re_group = re.compile(r'^\s*groups?\s*$', re.IGNORECASE)
+    re_group_name = re.compile(r'^\s*name\s*$', re.IGNORECASE)
+    re_doublequote = re.compile(r'"')
+    re_vm_path = re.compile(r'^\s*\[\s*([^\s\]]+)')
+
+    re_tf_version = re.compile(r'^\s*Terraform\s+v(\S+)', re.IGNORECASE)
+
+    std_file_permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
+    std_secure_file_permissions = stat.S_IRUSR | stat.S_IWUSR
+
+    open_opts = {}
+    if six.PY3:
+        open_opts['encoding'] = 'utf-8'
+        open_opts['errors'] = 'surrogateescape'
+
+    max_groups_depth = 10
+
+    tz_name = 'Europe/Berlin'
+    tz = pytz.timezone(tz_name)
+
+    steps = (
+        'init', 'read-yaml', 'collect-folders', 'pdns-zones', 'vmw-clusters',
+        'vmw-datastores', 'vmw-ds-clusters', 'vmw-networks', 'vmw-templates',
+        'validate-yaml', 'validate-storage', 'validate-iface', 'validate-dns',
+        'perform-dns', 'project-dir', 'tf-files', 'ensure-vmw-folders',
+    )
+    step_desc = {
+        'init': _('After initialization of all objects and handlers.'),
+        'read-yaml': _('After reading the given YAML file.'),
+        'collect-folders': _('After collecting all VMWare and local folders.'),
+        'pdns-zones': _('After retrieving all DNS zones from PowerDNS.'),
+        'vmw-clusters': _('After collecting all VMWare clusters.'),
+        'vmw-datastores': _('After collecting all VMWare datastores.'),
+        'vmw-ds-clusters': _('After collecting all VMWare datastore clusters.'),
+        'vmw-networks': _('After collecting all VMWare networks.'),
+        'vmw-templates': _('After validating all given VMWare templates.'),
+        'validate-yaml': _('After syntax validating of data from loaded YAML file.'),
+        'validate-storage': _('After validating all given storage data.'),
+        'validate-iface': _('After validating all given network interface data.'),
+        'validate-dns': _('After validating all given DNS data.'),
+        'perform-dns': _('After performing all necessary actions in DNS.'),
+        'project-dir': _('After ensuring availability of the project directory.'),
+        'tf-files': _('After creation of the Terraform project files.'),
+        'ensure-vmw-folders': _('After ensuring availability of VM folders in VMWare vSphere.'),
+    }
+
+    # -------------------------------------------------------------------------
+    def __init__(
+        self, appname=None, verbose=0, version=__version__, base_dir=None,
+            config=None, simulate=False, force=False, ignore_existing_dns=False,
+            terminal_has_colors=False, initialized=False):
+
+        self.pdns = None
+        self.vsphere = None
+        self.config = None
+
+        self.terraform_cmd = None
+
+        self.yaml_data = None
+
+        self.default_vm = None
+        self.group_default_vms = {}
+
+        self.ignore_existing_dns = bool(ignore_existing_dns)
+
+        self.vms = []
+        self.vsphere_templates = {}
+
+        self.vm_names = []
+        self.fqdns = {}
+        self.addresses = {}
+
+        self.vsphere_folders = []
+
+        self.used_networks = []
+        self.used_dc_clusters = []
+        self.used_datastores = []
+        self.project_dir = None
+        self.project_name = None
+
+        self.existing_vms = []
+
+        self.start_dir = Path(os.getcwd())
+
+        self.script_dir = None
+        self.script_dir_rel = None
+
+        self._stop_at_step = None
+
+        self.min_version_terraform = None
+        if MIN_VERSION_TERRAFORM:
+            self.min_version_terraform = LooseVersion(MIN_VERSION_TERRAFORM)
+
+        self.max_version_terraform = None
+        if MAX_VERSION_TERRAFORM:
+            self.max_version_terraform = LooseVersion(MAX_VERSION_TERRAFORM)
+
+        self.min_version_vsphere_provider = None
+        if MIN_VERSION_VSPHERE_PROVIDER:
+            self.min_version_vsphere_provider = LooseVersion(MIN_VERSION_VSPHERE_PROVIDER)
+
+        self.dns_mapping = {
+            'forward': [],
+            'reverse': [],
+        }
+        self.dns_mappings2create = {
+            'forward': [],
+            'reverse': [],
+        }
+
+        self.updated_zones = []
+
+        self.eval_errors = 0
+
+        super(CreateTerraformHandler, self).__init__(
+            appname=appname, verbose=verbose, version=version, base_dir=base_dir,
+            simulate=simulate, force=force, terminal_has_colors=terminal_has_colors,
+            initialized=False,
+        )
+
+        if config:
+            self.config = config
+
+        self.script_dir = self.base_dir.joinpath('postinstall-scripts')
+
+        if initialized:
+            self.initialized = True
+
+    # -----------------------------------------------------------
+    @HandlingObject.simulate.setter
+    def simulate(self, value):
+        self._simulate = to_bool(value)
+
+        if self.initialized:
+            LOG.debug(_("Setting simulate of all subsequent objects to {!r} ...").format(
+                self.simulate))
+
+        if self.pdns:
+            self.pdns.simulate = self.simulate
+
+        if self.vsphere:
+            self.vsphere.simulate = self.simulate
+
+    # -----------------------------------------------------------
+    @property
+    def stop_at_step(self):
+        """Step, at which the execution should be interrupted."""
+        return self._stop_at_step
+
+    @stop_at_step.setter
+    def stop_at_step(self, value):
+        if value is None:
+            self._stop_at_step = None
+            return
+        v = str(value).strip().lower().replace('_', '-')
+        if v == '':
+            self._stop_at_step = None
+            return
+        if v not in self.steps:
+            msg = _("Invalid step name {!r} for interrupting execution.").format(value)
+            raise ValueError(msg)
+        self._stop_at_step = v
+
+    # -------------------------------------------------------------------------
+    def as_dict(self, short=True):
+        """
+        Transforms the elements of the object into a dict
+
+        @param short: don't include local properties in resulting dict.
+        @type short: bool
+
+        @return: structure as dict
+        @rtype:  dict
+        """
+
+        res = super(CreateTerraformHandler, self).as_dict(short=short)
+        res['std_file_permissions'] = "{:04o}".format(self.std_file_permissions)
+        res['std_secure_file_permissions'] = "{:04o}".format(self.std_secure_file_permissions)
+        res['open_opts'] = self.open_opts
+        res['stop_at_step'] = self.stop_at_step
+        res['steps'] = copy.copy(self.steps)
+        res['tz_name'] = self.tz_name
+
+        return res
+
+    # -------------------------------------------------------------------------
+    @classmethod
+    def set_tz(cls, tz_name):
+
+        if not tz_name.strip():
+            raise ValueError(_("Invalid time zone name {!r}.").format(tz_name))
+        tz_name = tz_name.strip()
+        LOG.debug(_("Setting time zone to {!r}.").format(tz_name))
+        cls.tz = pytz.timezone(tz_name)
+        cls.tz_name = tz_name
+
+    # -------------------------------------------------------------------------
+    def incr_verbosity(self, diff=1):
+
+        new_verbose = self.verbose + int(diff)
+        if new_verbose < 0:
+            new_verbose = 0
+        self.verbose = new_verbose
+
+        if self.pdns:
+            self.pdns.verbose = self.verbose
+
+        if self.vsphere:
+            self.vsphere.verbose = self.verbose
+
+    # -------------------------------------------------------------------------
+    def init_handlers(self):
+
+        if not self.config:
+            msg = _("No configuration given before initialisation of handlers.")
+            raise HandlerError(msg)
+
+        if not isinstance(self.config, CrTfConfiguration):
+            raise HandlerError(_(
+                "{n} is not a {e}-instance, but a {w}-instance instead.").format(
+                n='self.config', e='CrTfConfiguration', w=self.config.__class__.__name__))
+
+        TerraformDisk.default_size = self.config.disk_size
+        TerraformDisk.min_size_gb = self.config.disk_min_size
+        TerraformDisk.max_size_gb = self.config.disk_max_size
+
+        TerraformVm.min_rootdisk_size = self.config.root_min_size
+        TerraformVm.max_rootdisk_size = self.config.root_max_size
+
+        LOG.info(_("Initialize some additional handlers."))
+
+        self.terraform_cmd = self.get_command('terraform', quiet=True)
+        if not self.terraform_cmd:
+            raise CommandNotFoundError('terraform')
+        self.check_terraform_version()
+
+        self.pdns = PowerDNSServer(
+            appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+            master_server=self.config.pdns_master_server,
+            port=self.config.pdns_api_port, key=self.config.pdns_api_key,
+            use_https=self.config.pdns_api_use_https, path_prefix=self.config.pdns_api_path_prefix,
+            simulate=self.simulate, force=self.force, initialized=True,
+        )
+
+        try:
+            api_version = self.pdns.get_api_server_version()            # noqa
+        except PowerDNSHandlerError as e:
+            raise ExpectedHandlerError(str(e))
+
+        try:
+            self.vsphere = VsphereServer(
+                appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+                host=self.config.vsphere_host, port=self.config.vsphere_port,
+                user=self.config.vsphere_user, password=self.config.vsphere_password,
+                dc=self.config.vsphere_dc, simulate=self.simulate, force=self.force,
+                terminal_has_colors=self.terminal_has_colors, initialized=True,
+            )
+
+            self.vsphere.get_about()
+        except VSphereExpectedError as e:
+            raise ExpectedHandlerError(str(e))
+
+    # -------------------------------------------------------------------------
+    def check_terraform_version(self):
+        """ Checking, that the called terraform has a minimum version."""
+
+        tf_timeout = 10
+
+        got_tf_version = None
+        LOG.info(_("Checking the terraform version ..."))
+
+        cmd = [str(self.terraform_cmd), 'version']
+        cmd_str = ' '.join(cmd)
+        LOG.debug(_("Executing {!r} ...").format(cmd_str))
+        result = self.run(
+            cmd, may_simulate=False, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
+        LOG.debug(_("Completed process:") + "\n" + str(result))
+
+        if not result.stdout:
+            msg = _("No output on command {!r}.").format(cmd_str)
+            raise ExpectedHandlerError(msg)
+        lines = result.stdout.splitlines()
+
+        if self.verbose > 2:
+            LOG.debug(_("First line:") + '\n' + lines[0])
+        match = self.re_tf_version.search(lines[0])
+        if not match:
+            msg = _("Could not evaluate version output of terraform:") + '\n' + result.stdout
+            raise ExpectedHandlerError(msg)
+
+        got_tf_version = LooseVersion(match.group(1))
+        LOG.info(_("Terraform version: {!r}.").format(str(got_tf_version)))
+
+        if self.min_version_terraform:
+            LOG.debug(_("Checking for {o}{m!r} ...").format(
+                o='>=', m=str(self.min_version_terraform)))
+            if got_tf_version < self.min_version_terraform:
+                msg = _("Invalid version {c!r} of terraform, expected {o}{m!r}.").format(
+                    c=str(got_tf_version), o='>=', m=str(self.min_version_terraform))
+                raise ExpectedHandlerError(msg)
+
+        if self.max_version_terraform:
+            LOG.debug(_("Checking for {o}{m!r} ...").format(
+                o='<=', m=str(self.max_version_terraform)))
+            if got_tf_version > self.max_version_terraform:
+                msg = _("Invalid version {c!r} of terraform, expected {o}{m!r}.").format(
+                    c=str(got_tf_version), o='<=', m=str(self.max_version_terraform))
+                raise ExpectedHandlerError(msg)
+
+    # -------------------------------------------------------------------------
+    def __del__(self):
+        """Destructor."""
+
+        LOG.debug(_("Self destruction."))
+
+        if self.pdns:
+            self.pdns = None
+
+        if self.vsphere:
+            self.vsphere = None
+
+    # -------------------------------------------------------------------------
+    def __call__(self, yaml_file):
+        """Executing the underlying action."""
+
+        if not self.initialized:
+            raise HandlerError(_("{}-object not initialized.").format(self.__class__.__name__))
+
+        try:
+
+            self.exec_init_run()
+
+            LOG.info(_("Go ahead..."))
+
+            self.exec_read_yaml(yaml_file)
+
+            if self.simulate:
+                print()
+                msg_a = _("Simulation mode")
+                msg_b = (
+                    "* " + _("Necessary DNS records are not created."),
+                    "* " + _("Terraform files are not created.")
+                )
+                ll = 4
+                if len(msg_a) > ll:
+                    ll = len(msg_a)
+                for msg in msg_b:
+                    if len(msg) > ll:
+                        ll = len(msg)
+
+                print(self.colored('#' * (ll + 4), 'AQUA'))
+                line = self.colored('#', 'AQUA') + ' '
+                line += self.colored(msg_a.center(ll), 'YELLOW')
+                line += ' ' + self.colored('#', 'AQUA')
+                print(line)
+                for msg in msg_b:
+                    line = '# ' + msg.ljust(ll) + ' #'
+                    print(self.colored(line, 'AQUA'))
+                print(self.colored('#' * (ll + 4), 'AQUA'))
+                print()
+
+            self.exec_collect_folders(yaml_file)
+            self.exec_pdns_zones()
+
+            print()
+            LOG.info(_("Retrieving information from vSphere."))
+
+            self.exec_vmw_clusters()
+            self.exec_vmw_datastores()
+            self.exec_vmw_ds_clusters()
+            self.exec_vmw_networks()
+            self.exec_vmw_templates()
+
+            self.exec_validate_yaml()
+            self.exec_validate_storage()
+            self.exec_validate_iface()
+            self.exec_validate_dns()
+
+            if self.verbose > 2:
+
+                vm_list = []
+                for vm in self.vms:
+                    vm_list.append(vm.as_dict())
+                LOG.debug(_("Validated VMs:") + "\n" + pp(vm_list))
+
+            if self.existing_vms:
+                msg = ngettext(
+                    "There is one existing virtual machine.",
+                    "There are {c} existing virtual machines.",
+                    len(self.existing_vms)).format(c=len(self.existing_vms))
+                LOG.warn(msg)
+                if self.verbose > 2:
+                    msg = ngettext(
+                        "Existing virtual machine:", "Existing virtual machines:",
+                        len(self.existing_vms))
+                    LOG.debug(msg + '\n' + pp(self.existing_vms))
+            else:
+                LOG.info(_("No existing virtual machines found in YAML file."))
+
+            self.exec_perform_dns()
+            self.exec_project_dir()
+
+            self.exec_tf_files()
+            self.exec_vsphere_folders()
+
+            LOG.info(_("Finished all steps."))
+
+        except AbortExecution as e:
+            LOG.warn(str(e))
+            return
+
+        self.exec_terraform()
+        if self.simulate:
+            print()
+            msg = print(self.colored(
+                _('And how I said before - it was only a simulation!'), 'AQUA'))
+
+        print()
+
+    # -------------------------------------------------------------------------·
+    def exec_init_run(self):
+
+        if self.stop_at_step == 'init':
+            self.incr_verbosity()
+
+        if self.verbose > 2:
+            LOG.debug(_("Current {} object:").format(self.__class__.__name__) + "\n" + str(self))
+
+        LOG.info(_("Finished step {!r}.").format('init'))
+        if self.stop_at_step == 'init':
+            raise AbortExecution('init')
+
+    # -------------------------------------------------------------------------·
+    def exec_read_yaml(self, yaml_file):
+
+        if self.stop_at_step == 'read-yaml':
+            self.incr_verbosity()
+
+        self.read_yaml_data(yaml_file)
+        self.eval_yaml_data()
+        if self.eval_errors:
+            msg = ngettext(
+                "Found one error in evaluation of YAML data of {f!r}.",
+                "Found {n} errors in evaluation of YAML data of {f!r}.",
+                self.eval_errors).format(n=self.eval_errors, f=str(yaml_file))
+            raise ExpectedHandlerError(msg)
+
+        LOG.info(_("Finished step {!r}.").format('read-yaml'))
+        if self.stop_at_step == 'read-yaml':
+            raise AbortExecution('read-yaml')
+
+    # -------------------------------------------------------------------------·
+    def exec_collect_folders(self, yaml_file):
+
+        if self.stop_at_step == 'collect-folders':
+            self.incr_verbosity()
+
+        LOG.info(_("Collecting all VMWare and local folders ..."))
+        LOG.info(_("Get vSphere datacenter ..."))
+        self.vsphere.get_datacenter()
+
+        LOG.debug(_("Collecting vSphere folders."))
+        self.vsphere_folders = []
+        for vm in self.vms:
+            if vm.folder:
+                if vm.folder not in self.vsphere_folders:
+                    self.vsphere_folders.append(vm.folder)
+        self.vsphere_folders.sort(key=str.lower)
+        LOG.debug(_("Collected vSphere folders:") + "\n" + pp(self.vsphere_folders))
+
+        # Set project name and directory
+        yfile = Path(yaml_file)
+        yfile_base = yfile.name
+        yfile_dir = yfile.parent.resolve()
+        (yfile_stem, yfile_ext) = os.path.splitext(yfile_base)
+        self.project_name = yfile_stem
+        LOG.info(_("Project name is {!r}.").format(str(self.project_name)))
+        if self.config.relative_tf_dir:
+            LOG.debug(_("Using a relative project directory path ..."))
+            self.project_dir = yfile_dir / yfile_stem
+        else:
+            self.project_dir = self.config.terraform_dir / yfile_stem
+        LOG.info(_("Project directory is: {!r}.").format(str(self.project_dir)))
+
+        LOG.info(_("Finished step {!r}.").format('collect-folders'))
+        if self.stop_at_step == 'collect-folders':
+            raise AbortExecution('collect-folders')
+
+    # -------------------------------------------------------------------------·
+    def exec_pdns_zones(self):
+
+        if self.stop_at_step == 'pdns-zones':
+            self.incr_verbosity()
+
+        print()
+        LOG.info(_("Retrieving informations from PowerDNS ..."))
+
+        self.pdns.get_api_zones()
+        if self.eval_errors:
+            msg = ngettext(
+                "Found one error in exploring PowerDNS zones.",
+                "Found {n} errors in exploring PowerDNS zones.",
+                self.eval_errors).format(n=self.eval_errors)
+            raise ExpectedHandlerError(msg)
+
+        LOG.info(_("Finished step {!r}.").format('pdns-zones'))
+        if self.stop_at_step == 'pdns-zones':
+            raise AbortExecution('pdns-zones')
+
+    # -------------------------------------------------------------------------·
+    def exec_vmw_clusters(self):
+
+        if self.stop_at_step == 'vmw-clusters':
+            self.incr_verbosity()
+
+        self.vsphere.get_clusters()
+
+        LOG.info(_("Finished step {!r}.").format('vmw-clusters'))
+        if self.stop_at_step == 'vmw-clusters':
+            raise AbortExecution('vmw-clusters')
+
+    # -------------------------------------------------------------------------·
+    def exec_vmw_datastores(self):
+
+        if self.stop_at_step == 'vmw-datastores':
+            self.incr_verbosity()
+
+        self.vsphere.get_datastores()
+
+        LOG.info(_("Finished step {!r}.").format('vmw-datastores'))
+        if self.stop_at_step == 'vmw-datastores':
+            raise AbortExecution('vmw-datastores')
+
+    # -------------------------------------------------------------------------·
+    def exec_vmw_ds_clusters(self):
+
+        if self.stop_at_step == 'vmw-ds-clusters':
+            self.incr_verbosity()
+
+        self.vsphere.get_ds_clusters()
+
+        LOG.info(_("Finished step {!r}.").format('vmw-ds-clusters'))
+        if self.stop_at_step == 'vmw-ds-clusters':
+            raise AbortExecution('vmw-ds-clusters')
+
+    # -------------------------------------------------------------------------·
+    def exec_vmw_networks(self):
+
+        if self.stop_at_step == 'vmw-networks':
+            self.incr_verbosity()
+
+        self.vsphere.get_networks()
+        if self.eval_errors:
+            msg = ngettext(
+                "Found one error in exploring vSphere resources.",
+                "Found {n} errors in exploring vSphere resources.",
+                self.eval_errors).format(n=self.eval_errors)
+            raise ExpectedHandlerError(msg)
+
+        LOG.info(_("Finished step {!r}.").format('vmw-networks'))
+        if self.stop_at_step == 'vmw-networks':
+            raise AbortExecution('vmw-networks')
+
+    # -------------------------------------------------------------------------·
+    def exec_vmw_templates(self):
+
+        if self.stop_at_step == 'vmw-templates':
+            self.incr_verbosity()
+
+        self.explore_vsphere_templates()
+        if self.eval_errors:
+            msg = ngettext(
+                "Found one error in exploring vSphere templates.",
+                "Found {n} errors in exploring vSphere templates.",
+                self.eval_errors).format(n=self.eval_errors)
+            raise ExpectedHandlerError(msg)
+
+        LOG.info(_("Finished step {!r}.").format('vmw-templates'))
+        if self.stop_at_step == 'vmw-templates':
+            raise AbortExecution('vmw-templates')
+
+    # -------------------------------------------------------------------------·
+    def exec_validate_yaml(self):
+
+        if self.stop_at_step == 'validate-yaml':
+            self.incr_verbosity()
+
+        print()
+        LOG.info(_("Validating information from YAML file ..."))
+
+        self.validate_clusters()
+        if self.eval_errors:
+            msg = ngettext(
+                "Found one error in validating vSphere computing clusters.",
+                "Found {n} errors in validating vSphere computing clusters.",
+                self.eval_errors).format(n=self.eval_errors)
+            raise ExpectedHandlerError(msg)
+
+        self.validate_vms()
+
+        LOG.info(_("Finished step {!r}.").format('validate-yaml'))
+        if self.stop_at_step == 'validate-yaml':
+            raise AbortExecution('validate-yaml')
+
+    # -------------------------------------------------------------------------·
+    def exec_validate_storage(self):
+
+        if self.stop_at_step == 'validate-storage':
+            self.incr_verbosity()
+
+        self.validate_storages()
+        if self.eval_errors:
+            msg = ngettext(
+                "Found one error in validating VM storages.",
+                "Found {n} errors in validating VM storages.",
+                self.eval_errors).format(n=self.eval_errors)
+            raise ExpectedHandlerError(msg)
+
+        LOG.info(_("Finished step {!r}.").format('validate-storage'))
+        if self.stop_at_step == 'validate-storage':
+            raise AbortExecution('validate-storage')
+
+    # -------------------------------------------------------------------------·
+    def exec_validate_iface(self):
+
+        if self.stop_at_step == 'validate-iface':
+            self.incr_verbosity()
+
+        self.validate_interfaces()
+        if self.eval_errors:
+            msg = ngettext(
+                "Found one error in validating VM interfaces.",
+                "Found {n} errors in validating VM interfaces.",
+                self.eval_errors).format(n=self.eval_errors)
+            raise ExpectedHandlerError(msg)
+
+        LOG.info(_("Finished step {!r}.").format('validate-iface'))
+        if self.stop_at_step == 'validate-iface':
+            raise AbortExecution('validate-iface')
+
+    # -------------------------------------------------------------------------·
+    def exec_validate_dns(self):
+
+        if self.stop_at_step == 'validate-dns':
+            self.incr_verbosity()
+
+        self.validate_dns_mappings()
+        if self.eval_errors:
+            msg = ngettext(
+                "Found one error in validating DNS mappings.",
+                "Found {n} errors in validating DNS mappings.",
+                self.eval_errors).format(n=self.eval_errors)
+            raise ExpectedHandlerError(msg)
+
+        LOG.info(_("Finished step {!r}.").format('validate-dns'))
+        if self.stop_at_step == 'validate-dns':
+            raise AbortExecution('validate-dns')
+
+    # -------------------------------------------------------------------------·
+    def exec_perform_dns(self):
+
+        if self.stop_at_step == 'perform-dns':
+            self.incr_verbosity()
+
+        self.perform_dns()
+
+        LOG.info(_("Finished step {!r}.").format('perform-dns'))
+        if self.stop_at_step == 'perform-dns':
+            raise AbortExecution('perform-dns')
+
+    # -------------------------------------------------------------------------·
+    def exec_project_dir(self):
+
+        if self.stop_at_step == 'project-dir':
+            self.incr_verbosity()
+
+        self.ensure_project_dir()
+        self.clean_project_dir()
+
+        LOG.info(_("Finished step {!r}.").format('project-dir'))
+        if self.stop_at_step == 'project-dir':
+            raise AbortExecution('project-dir')
+
+    # -------------------------------------------------------------------------·
+    def exec_tf_files(self):
+
+        if self.stop_at_step == 'tf-files':
+            self.incr_verbosity()
+
+        self.create_terraform_files()
+
+        LOG.info(_("Finished step {!r}.").format('tf-files'))
+        if self.stop_at_step == 'tf-files':
+            raise AbortExecution('tf-files')
+
+    # -------------------------------------------------------------------------·
+    def exec_vsphere_folders(self):
+
+        if self.stop_at_step == 'ensure-vmw-folders':
+            self.incr_verbosity()
+
+        self.ensure_vsphere_folders()
+
+        LOG.info(_("Finished step {!r}.").format('ensure-vmw-folders'))
+        if self.stop_at_step == 'ensure-vmw-folders':
+            raise AbortExecution('ensure-vmw-folders')
+
+    # -------------------------------------------------------------------------·
+    def read_yaml_data(self, yaml_file):
+
+        LOG.info(_("Reading YAML file {!r} ...").format(str(yaml_file)))
+
+        open_opts = {}
+        if six.PY3 and self.config.encoding:
+            open_opts['encoding'] = self.config.encoding
+            open_opts['errors'] = 'surrogateescape'
+
+        try:
+            with open(str(yaml_file), 'r', **open_opts) as fh:
+                self.yaml_data = yaml.load(fh)
+        except yaml.YAMLError as e:
+            msg = _("Error in YAML file {f!r}: {e}.").format(
+                f=str(yaml_file), e=e)
+            if hasattr(e, 'problem_mark'):
+                mark = e.problem_mark
+                msg += " " + _("Error position: {li}:{c}").format(
+                    li=mark.line + 1, c=mark.column + 1)
+            raise ExpectedHandlerError(msg)
+
+        if self.verbose > 2:
+            LOG.debug(_("Read data from YAML file:") + "\n" + pp(self.yaml_data))
+
+        if not isinstance(self.yaml_data, dict):
+            msg = _(
+                "Data read from YAML file {f!r} are not a dictionary, "
+                "but a {c} object instead.").format(
+                f=str(yaml_file), c=self.yaml_data.__class__.__name__)
+            raise ExpectedHandlerError(msg)
+
+        for key in self.yaml_data.keys():
+            if key.lower() == 'simulate':
+                self.simulate = to_bool(self.yaml_data[key])
+
+    # -------------------------------------------------------------------------·
+    def eval_yaml_data(self):
+
+        self.vm_names = []
+
+        # Searching for default VM definition
+        LOG.debug(_("Searching for default VM definition ..."))
+        for key in self.yaml_data.keys():
+
+            if self.re_default.match(key):
+                vm = self._eval_tpl_vm(name='Default VM', vm_def=self.yaml_data[key])
+                if vm:
+                    self.default_vm = vm
+
+        # Searching for VM definitions
+        LOG.debug(_("Searching for VM definitions ..."))
+        for key in self.yaml_data.keys():
+            if self.re_vm_key.match(key):
+                for vm_def in self.yaml_data[key]:
+                    vm = self._eval_vm(vm_def, template_vm=self.default_vm)
+                    if vm:
+                        self.vms.append(vm)
+
+        # Searching for groups
+        for key in self.yaml_data.keys():
+            if self.re_group.match(key):
+                self._eval_vm_groups(self.yaml_data[key], template_vm=self.default_vm, depth=1)
+
+        if self.verbose > 2:
+            vm_list = []
+            for vm in self.vms:
+                vm_list.append(vm.as_dict())
+            LOG.debug(_("Evaluated VMs:") + "\n" + pp(vm_list))
+
+    # -------------------------------------------------------------------------·
+    def _eval_tpl_vm(self, name, vm_def, template_vm=None):
+
+        try:
+            vm = TerraformVm.from_def(
+                vm_def, name=name, is_template=True, template_vm=template_vm,
+                default_cluster=self.config.vsphere_cluster, appname=self.appname,
+                verbose=self.verbose, base_dir=self.base_dir, simulate=self.simulate,
+                force=self.force, terminal_has_colors=self.terminal_has_colors)
+        except Exception as e:
+            if self.verbose > 2:
+                self.handle_error(str(e), e.__class__.__name__, True)
+            else:
+                LOG.error(_("{c} in evaluating template VM: {e}").format(
+                    c=e.__class__.__name__, e=e))
+            self.eval_errors += 1
+            return None
+
+        if self.verbose > 2:
+            LOG.debug(_(
+                "Defined Terraform Template VM {n!r}:").format(
+                n=vm.name) + "\n" + pp(vm.as_dict()))
+
+        return vm
+
+    # -------------------------------------------------------------------------·
+    def _eval_vm(self, vm_def, template_vm=None):
+
+        try:
+            vm = TerraformVm.from_def(
+                vm_def, is_template=False, template_vm=template_vm,
+                default_cluster=self.config.vsphere_cluster, appname=self.appname,
+                verbose=self.verbose, base_dir=self.base_dir, simulate=self.simulate,
+                force=self.force, terminal_has_colors=self.terminal_has_colors)
+        except Exception as e:
+            if self.verbose > 2:
+                self.handle_error(str(e), e.__class__.__name__, True)
+            else:
+                LOG.error(_("{c} in evaluating VM: {e}").format(c=e.__class__.__name__, e=e))
+            self.eval_errors += 1
+            return None
+
+        if self.verbose > 3:
+            LOG.debug(_(
+                "Defined Terraform-VM {n!r}:").format(n=vm.name) + "\n" + pp(vm.as_dict()))
+
+        if vm.name in self.vm_names:
+            LOG.error(_("VM {!r} is already defined.").format(vm.name))
+            self.eval_errors += 1
+            return None
+
+        return vm
+
+    # -------------------------------------------------------------------------·
+    def _eval_vm_groups(self, groups_def, template_vm=None, depth=1):
+
+        if not isinstance(groups_def, list):
+            msg = _("Group definition list is not a list:") + "\n" + pp(groups_def)
+            LOG.error(msg)
+            self.eval_errors += 1
+            return
+
+        if depth >= self.max_groups_depth:
+            LOG.warn(_("Maximum recursion depth for VM groups of {} reached.").format(depth))
+            return
+
+        if self.verbose > 2:
+            LOG.debug(_("Evaluating group list:") + "\n" + pp(groups_def))
+        if self.verbose > 3:
+            LOG.debug(_("Used template: {!r}").format(template_vm))
+
+        for group_def in groups_def:
+            self._eval_vm_group(group_def, template_vm=template_vm, depth=depth)
+
+    # -------------------------------------------------------------------------·
+    def _eval_vm_group(self, group_def, template_vm=None, depth=1):
+
+        if not isinstance(group_def, dict):
+            msg = _("VM definition is not a dictionary:") + "\n" + pp(group_def)
+            LOG.error(msg)
+            self.eval_errors += 1
+            return
+
+        group_template = template_vm
+        group_name = None
+
+        # Searching for the group name ..."
+        for key in group_def.keys():
+            if self.re_group_name.match(key) and str(group_def[key]).strip():
+                group_name = str(group_def[key]).strip()
+
+        if not group_name:
+            LOG.error(_("No group name defined."))
+            return
+
+        # Searching for group default VM definition
+        LOG.debug(_("Searching for group default VM definition in group {!r} ...").format(
+            group_name))
+        for key in group_def.keys():
+
+            if self.re_default.match(key):
+                vm_name = 'Default VM group {!r}'.format(group_name)
+                vm = self._eval_tpl_vm(
+                    name=vm_name, vm_def=group_def[key], template_vm=template_vm)
+                if vm:
+                    group_template = vm
+                    break
+
+        n = None
+        if group_template:
+            n = group_template.name
+        LOG.debug(_("Used template for creating VMs in group {g!r}: {n!r}").format(
+            g=group_name, n=n))
+        if self.verbose > 3:
+            LOG.debug(_("Used template structure:") + "\n" + pp(group_template.as_dict()))
+
+        # Searching for VM definitions
+        LOG.debug(_("Searching for VM definitions in group {!r} ...").format(group_name))
+        for key in group_def.keys():
+            if self.re_vm_key.match(key):
+                for vm_def in group_def[key]:
+                    vm = self._eval_vm(vm_def, template_vm=group_template)
+                    if vm:
+                        self.vms.append(vm)
+
+        # Searching for nested groups
+        for key in group_def.keys():
+            if self.re_group.match(key):
+                self._eval_vm_groups(
+                    group_def[key], template_vm=group_template, depth=depth + 1)
+
+    # -------------------------------------------------------------------------·
+    def explore_vsphere_templates(self):
+
+        LOG.info(_("Exploring all vSphere templates ..."))
+
+        template_names = []
+        if self.config.template_name:
+            template_names.append(self.config.template_name)
+
+        for vm in self.vms:
+            template_name = vm.vm_template
+            if template_name:
+                if template_name not in template_names:
+                    template_names.append(template_name)
+            else:
+                LOG.error(_("VM {!r} has not template defined.").format(vm.name))
+                self.eval_errors += 1
+
+        LOG.debug(_("All vSphere templates to explore:") + "\n" + pp(template_names))
+
+        for template_name in template_names:
+
+            vm_info = self.vsphere.get_vm(template_name)
+            if vm_info:
+                tname = template_name.lower()
+                if tname not in self.vsphere_templates:
+                    self.vsphere_templates[tname] = vm_info
+            else:
+                self.eval_errors += 1
+
+        if self.verbose > 2:
+            LOG.debug(_("All explored vSphere templates:") + "\n" + pp(self.vsphere_templates))
+
+    # -------------------------------------------------------------------------·
+    def validate_clusters(self):
+
+        print()
+        LOG.info(_("Validating existence of computing clusters of the VMs."))
+
+        clusters = {}
+
+        for vm in self.vms:
+
+            if vm.cluster in clusters:
+                clusters[vm.cluster].append(vm.name)
+            else:
+                clusters[vm.cluster] = [vm.name]
+
+        for cluster in clusters.keys():
+
+            cl = str(cluster)
+            LOG.debug(_("Checking existence of computing cluster {!r} ...").format(cl))
+
+            vmw_cluster = self.vsphere.get_cluster_by_name(cl)
+            if vmw_cluster:
+                if self.verbose > 2:
+                    LOG.debug(
+                        _("Found computing cluster {cl!r} (defined for VMs {vms}).").format(
+                            cl=vmw_cluster.name, vms=pp(clusters[cluster])))
+            else:
+                LOG.error(
+                    _("Computing cluster {cl!r} (defined for VMs {vms}) not found.").format(
+                        cl=cl, vms=pp(clusters[cluster])))
+                self.eval_errors += 1
+
+    # -------------------------------------------------------------------------·
+    def validate_vms(self):
+
+        print()
+        LOG.info(_("Validating existence of VMs in VMWare."))
+        vms2perform = []
+
+        for vm in sorted(self.vms, key=lambda x: x.tf_name):
+
+            print(" * {} ".format(vm.fqdn), end='', flush=True)
+            if self.verbose:
+                print()
+
+            vm_info = self.vsphere.get_vm(vm.name, no_error=True)
+            if vm_info:
+                print(
+                    _('[{}]  - VM is already existing.').format(
+                        self.colored('Existing', 'YELLOW')), end='', flush=True)
+                if self.verbose > 0:
+                    print()
+                if self.verbose > 2:
+                    LOG.debug(_("VM info:") + "\n" + pp(vm_info))
+                match = self.re_vm_path.search(vm_info['vmPathName'])
+                if match:
+                    ds = match.group(1)
+                    LOG.debug(_("Datastore of VM {vm!r}: {ds!r}.").format(
+                        vm=vm.name, ds=ds))
+                    vm.datastore = ds
+                    vm.already_existing = True
+                    self.existing_vms.append(vm_info)
+                else:
+                    LOG.error(_("Did not found datastore of existing VM {!r}.").format(vm.name))
+            else:
+                print('[{}] '.format(self.colored('OK', 'GREEN')), end='', flush=True)
+                vm.already_existing = False
+            vms2perform.append(vm)
+            print()
+
+        self.vms = vms2perform
+
+        print()
+
+        if not len(self.vms):
+            print()
+            print(self.colored('*' * 60, ('BOLD', 'RED')), file=sys.stderr)
+            print(self.colored('*  ' + _('CAUTION!'), ('BOLD', 'RED')), file=sys.stderr)
+            print(self.colored('*' * 60, ('BOLD', 'RED')), file=sys.stderr)
+            print()
+            print(
+                self.colored(_('Did not found any VM to deploy!'), ('BOLD', 'RED')),
+                file=sys.stderr)
+            print()
+            raise ExpectedHandlerError(_("No VMs to deploy"))
+
+    # -------------------------------------------------------------------------·
+    def validate_storages(self):
+
+        self._validate_ds_clusters()
+        self._validate_datastores()
+
+        if self.verbose:
+            if self.used_dc_clusters:
+                out = '\n'.join(map(lambda x: '  * {}'.format(x), self.used_dc_clusters))
+                LOG.debug(_("Used datastore clusters:") + "\n" + out)
+            else:
+                LOG.debug(_("No datastore clusters are used."))
+            if self.used_datastores:
+                out = '\n'.join(map(lambda x: '  * {}'.format(x), self.used_datastores))
+                LOG.debug(_("Used datastors:") + "\n" + out)
+            else:
+                LOG.debug(_("No datastores are used."))
+
+    # -------------------------------------------------------------------------·
+    def _validate_ds_clusters(self):
+
+        LOG.info(_("Validating given datastore clusters of VMs ..."))
+
+        for vm in self.vms:
+
+            if not vm.ds_cluster:
+                continue
+
+            self._validate_dscluster_vm(vm)
+
+    # -------------------------------------------------------------------------·
+    def _validate_dscluster_vm(self, vm):
+
+        needed_gb = 0.0
+        if not vm.already_existing:
+            for unit_number in vm.disks.keys():
+                disk = vm.disks[unit_number]
+                needed_gb += disk.size_gb
+
+        found = False
+        for cluster_name in self.vsphere.ds_clusters.keys():
+            if cluster_name.lower() == vm.ds_cluster.lower():
+                if self.verbose > 2:
+                    LOG.debug(_("Found datastore cluster {c!r} for VM {n!r}.").format(
+                        n=vm.name, c=vm.ds_cluster))
+                if vm.ds_cluster != cluster_name:
+                    LOG.debug(_("Setting datastore cluster for VM {n!r} to {c!r} ...").format(
+                        n=vm.name, c=cluster_name))
+                    vm.ds_cluster = cluster_name
+                ds_cluster = self.vsphere.ds_clusters[cluster_name]
+                if self.verbose > 2:
+                    LOG.debug(_(
+                        "Free space in cluster {c!r} before provisioning: "
+                        "{a:0.1f} GiB.").format(
+                        c=cluster_name, a=ds_cluster.avail_space_gb))
+                if ds_cluster.avail_space_gb < needed_gb:
+                    LOG.error(_(
+                        "Datastore cluster {d!r} has not sufficient space for storage of VM "
+                        "{v!r} (needed {n:0.1f} GiB, available {a:0.1f} GiB).").format(
+                            d=cluster_name, v=vm.name, n=needed_gb, a=ds_cluster.avail_space_gb))
+                    self.eval_errors += 1
+                else:
+                    ds_cluster.calculated_usage += needed_gb
+                    if self.verbose > 1:
+                        LOG.debug(_(
+                            "Free space in cluster {c!r} after provisioning: "
+                            "{a:0.1f} GiB.").format(
+                            c=cluster_name, a=ds_cluster.avail_space_gb))
+                found = True
+                if cluster_name not in self.used_dc_clusters:
+                    self.used_dc_clusters.append(cluster_name)
+                break
+
+        if not found:
+            LOG.error(_("Datastore cluster {c!r} of VM {n!r} not found.").format(
+                n=vm.name, c=vm.ds_cluster))
+            self.eval_errors += 1
+
+    # -------------------------------------------------------------------------·
+    def _validate_datastores(self):
+
+        LOG.info(_("Validating given datastores of VMs and assign failing ..."))
+
+        for vm in self.vms:
+
+            if vm.ds_cluster:
+                if vm.datastore:
+                    LOG.debug(_("Removing defined datastore {d!r} for VM {n!r} ...").format(
+                        d=vm.datastore, n=vm.name))
+                    vm.datastore = None
+                continue
+
+            self._validate_ds_vm(vm)
+
+    # -------------------------------------------------------------------------·
+    def _validate_ds_vm(self, vm):
+
+        needed_gb = 0.0
+        if not vm.already_existing:
+            for unit_number in vm.disks.keys():
+                disk = vm.disks[unit_number]
+                needed_gb += disk.size_gb
+
+        vm_cluster = None
+        for cluster in self.vsphere.clusters:
+            if cluster.name.lower() == vm.cluster.lower():
+                vm_cluster = cluster
+                break
+        if not vm_cluster:
+            msg = _("Did not found cluster object {c!r} for VM {n!r}.").format(
+                c=vm.cluster, n=vm.name)
+            raise HandlerError(msg)
+
+        if vm.datastore:
+            found = False
+            for ds_name in self.vsphere.datastores:
+                if ds_name.lower() == vm.datastore.lower():
+                    if self.verbose > 2:
+                        LOG.debug(_("Found datastore {d!r} for VM {n!r}.").format(
+                            n=vm.name, d=vm.datastore))
+                    if ds_name not in vm_cluster.datastores:
+                        LOG.warn(_("Datastore {d!r} not available in cluster {c!r}.").format(
+                            d=ds_name, c=vm.cluster))
+                        break
+                    if vm.datastore != ds_name:
+                        LOG.debug(_("Setting datastore for VM {n!r} to {d!r} ...").format(
+                            n=vm.name, d=ds_name))
+                        vm.datastore = ds_name
+                    ds = self.vsphere.datastores[ds_name]
+                    if ds.avail_space_gb < needed_gb:
+                        LOG.error(_(
+                            "Datastore {d!r} has not sufficient space for storage of VM "
+                            "{v!r} (needed {n:0.1f} GiB, available {a:0.1f} GiB).").format(
+                                d=ds_name, v=vm.name, n=needed_gb, a=ds.avail_space_gb))
+                        self.eval_errors += 1
+                    else:
+                        ds.calculated_usage += needed_gb
+                    found = True
+                    break
+            if not found:
+                LOG.error(_("Datastore {d!r} of VM {n!r} not found.").format(
+                    n=vm.name, d=vm.datastore))
+                self.eval_errors += 1
+            return
+
+        ds_name = self.vsphere.datastores.find_ds(
+            needed_gb, vm.ds_type, use_ds=copy.copy(vm_cluster.datastores), no_k8s=True)
+        if ds_name:
+            LOG.debug(_("Found datastore {d!r} for VM {v!r}.").format(d=ds_name, v=vm.name))
+            vm.datastore = ds_name
+            if ds_name not in self.used_datastores:
+                self.used_datastores.append(ds_name)
+        else:
+            self.eval_errors += 1
+
+    # -------------------------------------------------------------------------·
+    def validate_interfaces(self):
+
+        LOG.info(_("Validating interfaces of VMs and assign networks ..."))
+        for vm in self.vms:
+            self._validate_interfaces_vm(vm)
+
+        if self.verbose > 2:
+            LOG.debug(_("Validated FQDNs:") + "\n" + pp(self.fqdns))
+            LOG.debug(_("Validated Addresses:") + "\n" + pp(self.addresses))
+
+        if self.verbose:
+
+            out = '\n'.join(map(lambda x: '  * {}'.format(x), self.used_networks))
+            LOG.debug(_("Used networks:") + "\n" + out)
+
+            lines = []
+            for pair in self.dns_mapping['forward']:
+                line = '  * {n!r} => {a!r}'.format(n=pair[0], a=str(pair[1]))
+                lines.append(line)
+            LOG.debug(_("Used forward DNS entries:") + "\n" + '\n'.join(lines))
+
+            lines = []
+            for pair in self.dns_mapping['reverse']:
+                line = '  * {a!r} => {n!r}'.format(n=pair[1], a=str(pair[0]))
+                lines.append(line)
+            LOG.debug(_("Used reverse DNS entries:") + "\n" + '\n'.join(lines))
+
+    # -------------------------------------------------------------------------·
+    def _validate_interfaces_vm(self, vm):
+
+        LOG.debug(_("Checking interfaces of VM {!r} ...").format(vm.name))
+
+        if not vm.interfaces:
+            LOG.error(_("No interfaces defined for VM {!r}.").format(vm.name))
+            self.eval_errors += 1
+            return
+
+        vm_cluster = None
+        for cluster in self.vsphere.clusters:
+            if cluster.name.lower() == vm.cluster.lower():
+                vm_cluster = cluster
+                break
+        if not vm_cluster:
+            msg = _("Did not found cluster object {c!r} for VM {n!r}.").format(
+                c=vm.cluster, n=vm.name)
+            raise HandlerError(msg)
+
+        i = -1
+        for iface in vm.interfaces:
+
+            i += 1
+            if self.verbose > 1:
+                LOG.debug(_("Checking interface {i} of VM {n!r} ...").format(
+                    i=i, n=vm.name))
+            if not iface.address:
+                LOG.error(_("Interface {i} of VM {n!r} has no defined address.").format(
+                    i=i, n=vm.name))
+                self.eval_errors += 1
+                continue
+
+            if not iface.fqdn:
+                LOG.error(_("Interface {i} of VM {n!r} has no defined FQDN.").format(
+                    i=i, n=vm.name))
+                self.eval_errors += 1
+                continue
+
+            if iface.fqdn in self.fqdns:
+                LOG.error(_(
+                    "FQDN {f!r} already defined for VM {va!r}({ia}) should be set "
+                    "for interface {ib} of {vb!r}.").format(
+                    f=iface.fqdn, va=self.fqdns[iface.fqdn][0], ia=self.fqdns[iface.fqdn][1],
+                    ib=i, vb=vm.name))
+                self.eval_errors += 1
+                continue
+
+            self.fqdns[iface.fqdn] = (vm.name, i)
+
+            if iface.address_v4:
+                if iface.address_v4 in self.addresses:
+                    LOG.error(_(
+                        "IPv4 address {a} already defined for VM {va!r}({ia}) should be set "
+                        "for interface {ib} of {vb!r}.").format(
+                        a=iface.address_v4, va=self.fqdns[iface.fqdn][0],
+                        ia=self.fqdns[iface.fqdn][1], ib=i, vb=vm.name))
+                    self.eval_errors += 1
+                    continue
+                self.addresses[iface.address_v4] = (vm.name, i)
+                pair = (iface.fqdn, iface.address_v4)
+                self.dns_mapping['forward'].append(pair)
+                pair = (iface.address_v4, iface.fqdn)
+                self.dns_mapping['reverse'].append(pair)
+
+            if iface.address_v6:
+                if iface.address_v6 in self.addresses:
+                    LOG.error(_(
+                        "IPv6 address {a} already defined for VM {va!r}({ia}) should be set "
+                        "for interface {ib} of {vb!r}.").format(
+                        a=iface.address_v6, va=self.fqdns[iface.fqdn][0],
+                        ia=self.fqdns[iface.fqdn][1], ib=i, vb=vm.name))
+                    self.eval_errors += 1
+                    continue
+                self.addresses[iface.address_v6] = (vm.name, i)
+                pair = (iface.fqdn, iface.address_v6)
+                self.dns_mapping['forward'].append(pair)
+                pair = (iface.address_v6, iface.fqdn)
+                self.dns_mapping['reverse'].append(pair)
+
+            network = iface.network
+            if network:
+                if network not in self.vsphere.networks:
+                    LOG.error(_(
+                        "Could not find network {n!r} for VM {v!r}, interface {i}.").format(
+                        n=network, v=vm.name, i=i))
+                    self.eval_errors += 1
+                    continue
+            else:
+                network = self.vsphere.networks.get_network_for_ip(
+                    iface.address_v4, iface.address_v6)
+                if not network:
+                    self.eval_errors += 1
+                    continue
+                iface.network = network
+            LOG.debug(_("Found network {n!r} for interface {i} of VM {v!r}.").format(
+                n=network, i=i, v=vm.name))
+
+            if network not in vm_cluster.networks:
+                LOG.error(_(
+                    "Network {n!r} for interface {i} of VM {v!r} not available in "
+                    "cluster {c!r}.").format(n=network, v=vm.name, i=i, c=vm_cluster.name))
+                self.eval_errors += 1
+                continue
+            LOG.debug(_("Network {n!r} is available in cluster {c!r}.").format(
+                n=network, c=vm_cluster.name))
+
+            net = self.vsphere.networks[network]
+            if not iface.gateway:
+                LOG.debug(_("Setting gateway of interface {i} of VM {v!r} to {g}.").format(
+                    i=i, v=vm.name, g=net.gateway))
+                iface.gateway = net.gateway
+
+            if net.network:
+                if net.network.version == 4:
+                    if iface.netmask_v4 is None:
+                        iface.netmask_v4 = net.network.prefixlen
+                else:
+                    if iface.netmask_v6 is None:
+                        iface.netmask_v6 = net.network.prefixlen
+
+            if network not in self.used_networks:
+                self.used_networks.append(network)
+
+    # -------------------------------------------------------------------------·
+    def validate_dns_mappings(self):
+
+        LOG.info(_("Validating DNS mappings ..."))
+        self._validate_forward_dns_mappings()
+        self._validate_reverse_dns_mappings()
+
+        lines = []
+        for pair in self.dns_mappings2create['forward']:
+            line = '  * {n!r} => {a!r}'.format(n=pair[0], a=str(pair[1]))
+            lines.append(line)
+        LOG.info(_("Forward DNS entries to create:") + "\n" + '\n'.join(lines))
+
+        lines = []
+        for pair in self.dns_mappings2create['reverse']:
+            line = '  * {r} ({a!r}) => {n!r}'.format(
+                r=pair[0].reverse_pointer, n=pair[1], a=str(pair[0]))
+            lines.append(line)
+        LOG.info(_("Reverse DNS entries to create:") + "\n" + '\n'.join(lines))
+
+    # -------------------------------------------------------------------------·
+    def _validate_forward_dns_mappings(self):
+
+        if not self.dns_mapping['forward']:
+            return
+
+        LOG.debug(_("Validating forward DNS mappings ..."))
+
+        for (fqdn, address) in self.dns_mapping['forward']:
+
+            if self.verbose > 1:
+                LOG.debug(_("Validating {f!r} => {a!r}.").format(f=fqdn, a=str(address)))
+
+            results_v4 = []
+            results_v6 = []
+
+            try:
+                addr_infos = socket.getaddrinfo(fqdn, 80)
+            except socket.gaierror:
+                addr_infos = []
+
+            for addr_info in addr_infos:
+                if addr_info[0] not in (socket.AF_INET, socket.AF_INET6):
+                    continue
+                addr = ipaddress.ip_address(addr_info[4][0])
+                if addr.version == 4:
+                    if addr not in results_v4:
+                        results_v4.append(addr)
+                else:
+                    if addr not in results_v6:
+                        results_v6.append(addr)
+            if self.verbose > 2:
+                if results_v4 or results_v6:
+                    lines = []
+                    for addr in results_v4 + results_v6:
+                        lines.append('  * {}'.format(str(addr)))
+                    out = '\n'.join(lines)
+                    LOG.debug(_("Found existing addresses for {f!r}:").format(f=fqdn) + '\n' + out)
+                else:
+                    LOG.debug(_("Did not found existing addresses for {!r}.").format(fqdn))
+
+            if address.version == 4:
+                if not results_v4:
+                    self.dns_mappings2create['forward'].append((fqdn, address))
+                    continue
+                if address in results_v4:
+                    LOG.debug(_("FQDN {f!r} already points to {a!r}.").format(
+                        f=fqdn, a=str(address)))
+                    continue
+            else:
+                if not results_v6:
+                    self.dns_mappings2create['forward'].append((fqdn, address))
+                    continue
+                if address in results_v6:
+                    LOG.debug(_("FQDN {f!r} already points to {a!r}.").format(
+                        f=fqdn, a=str(address)))
+                    continue
+
+            alist = '\n'.join(map(lambda x: '  * {}'.format(str(x)), results_v4 + results_v6))
+            msg = (_(
+                "FQDN {f!r} has already existing addresses, "
+                "but none of them are {a!r}:").format(f=fqdn, a=str(address)) + "\n" + alist)
+            if self.ignore_existing_dns:
+                LOG.warn(msg)
+                self.dns_mappings2create['forward'].append((fqdn, address))
+            else:
+                LOG.error(msg)
+                self.eval_errors += 1
+
+    # -------------------------------------------------------------------------·
+    def _validate_reverse_dns_mappings(self):
+
+        if not self.dns_mapping['reverse']:
+            return
+
+        LOG.debug(_("Validating reverse DNS mappings ..."))
+
+        for (address, fqdn) in self.dns_mapping['reverse']:
+
+            if self.verbose > 1:
+                LOG.debug(_("Validating {a!r} => {f!r}.").format(f=fqdn, a=str(address)))
+
+            try:
+                info = socket.gethostbyaddr(str(address))
+            except socket.herror:
+                info = []
+            if self.verbose > 2:
+                LOG.debug(_("Got reverse info:") + "\n" + str(info))
+            ptr = None
+            if info:
+                ptr = info[0]
+
+            if not ptr:
+                if self.verbose > 1:
+                    LOG.debug(_("Did not found reverse pointer for {!r}.").format(str(address)))
+                self.dns_mappings2create['reverse'].append((address, fqdn))
+                continue
+
+            ptr = RE_DOT_AT_END.sub('', ptr).lower()
+            fqdn_canon = RE_DOT_AT_END.sub('', fqdn).lower()
+
+            if self.verbose > 1:
+                LOG.debug(_("Found reverse pointer {a!r} => {f!r}.").format(f=ptr, a=str(address)))
+            if fqdn_canon == ptr:
+                if self.verbose > 1:
+                    LOG.debug(_("Reverse pointer for {!r} was already existing.").format(
+                        str(address)))
+                continue
+
+            LOG.error(_("Address {a!r} has already an existing reverse pointer to {p!r}.").format(
+                a=str(address), p=ptr))
+            self.eval_errors += 1
+
+    # -------------------------------------------------------------------------·
+    def get_tf_name_network(self, net_name, *args):
+
+        default = None
+        has_default = False
+        if len(args):
+            if len(args) > 1:
+                msg = ngettext(
+                    "Method {c}.{m} expected at most one argument, got {n}.",
+                    "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
+                    c=self.__class__.__name__, e=2, m='get_tf_name_network', n=len(args))
+                raise TypeError(msg)
+            default = args[0]
+            has_default = True
+
+        if net_name in self.vsphere.network_mapping:
+            return self.vsphere.network_mapping[net_name]
+        if has_default:
+            return default
+        raise KeyError(_("Did not found network {!r}.").format(net_name))
+
+    # --------------------------------------------------------------------------
+    def get_tf_name_ds_cluster(self, dsc_name, *args):
+
+        default = None
+        has_default = False
+        if len(args):
+            if len(args) > 1:
+                msg = ngettext(
+                    "Method {c}.{m} expected at most one argument, got {n}.",
+                    "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
+                    c=self.__class__.__name__, e=2, m='get_tf_name_ds_cluster', n=len(args))
+                raise TypeError(msg)
+            default = args[0]
+            has_default = True
+
+        if dsc_name in self.vsphere.ds_cluster_mapping:
+            return self.vsphere.ds_cluster_mapping[dsc_name]
+        if has_default:
+            return default
+        raise KeyError(_("Did not found datastore cluster {!r}.").format(dsc_name))
+
+    # --------------------------------------------------------------------------
+    def get_tf_name_datastore(self, ds_name, *args):
+
+        default = None
+        has_default = False
+        if len(args):
+            if len(args) > 1:
+                msg = ngettext(
+                    "Method {c}.{m} expected at most one argument, got {n}.",
+                    "Method {c}.{m} expected at most {e} arguments, got {n}.", 2).format(
+                    c=self.__class__.__name__, e=2, m='get_tf_name_datastore', n=len(args))
+                raise TypeError(msg)
+            default = args[0]
+            has_default = True
+
+        if ds_name in self.vsphere.ds_mapping:
+            return self.vsphere.ds_mapping[ds_name]
+        if has_default:
+            return default
+        raise KeyError(_("Did not found datastore {!r}.").format(ds_name))
+
+    # --------------------------------------------------------------------------
+    def perform_dns(self):
+
+        print()
+        LOG.info(_("Performing DNS actions ..."))
+        print()
+
+        # TODO: Check for simulate and mappings to create
+
+        errors = 0
+
+        for (fqdn, address) in self.dns_mappings2create['forward']:
+            if not self._perform_dns_forward(fqdn, address):
+                errors += 1
+
+        for (address, fqdn) in self.dns_mappings2create['reverse']:
+            if not self._perform_dns_reverse(address, fqdn):
+                errors += 1
+
+        if errors:
+            msg = ngettext(
+                "There was one error in creating DNS mappings.",
+                "There were {n} errors in creating DNS mappings.", errors).format(n=errors)
+            raise ExpectedHandlerError(msg)
+        else:
+            if self.verbose > 1:
+                LOG.debug(_("No errors in creating DNS mappings."))
+
+        print()
+
+        for zone_name in self.updated_zones:
+            self._increase_zone_serial(zone_name)
+
+    # --------------------------------------------------------------------------
+    def _increase_zone_serial(self, zone_name):
+
+        LOG.info(_("Increasing serial of zone {!r}.").format(zone_name))
+
+        zone = self.pdns.zones[zone_name]
+        zone.increase_serial()
+        zone.notify()
+
+    # --------------------------------------------------------------------------
+    def _perform_dns_forward(self, fqdn, address):
+
+        record_type = 'A'
+        addr_obj = ipaddress.ip_address(address)
+        if addr_obj.version == 6:
+            record_type = 'AAAA'
+
+        canon_fqdn = self.pdns.canon_name(fqdn)
+
+        zone_name = self.pdns.get_zone_for_item(canon_fqdn, is_fqdn=True)
+        if zone_name:
+            if self.verbose > 1:
+                LOG.debug(_("Got zone {z!r} for FQDN {f!r}.").format(
+                    z=zone_name, f=canon_fqdn))
+        else:
+            LOG.error(_("Did not found zone to insert {t}-record for {f!r}.").format(
+                t=record_type, f=fqdn))
+            return False
+
+        zone = self.pdns.zones[zone_name]
+        if addr_obj.is_private:
+            zone.add_address_record(
+                fqdn, address, set_ptr=False, comment='local',
+                account=self.config.pdns_comment_account, append_comments=True)
+        else:
+            zone.add_address_record(fqdn, address, set_ptr=False)
+        if zone_name not in self.updated_zones:
+            self.updated_zones.append(zone_name)
+        return True
+
+    # --------------------------------------------------------------------------
+    def _perform_dns_reverse(self, address, fqdn):
+
+        LOG.debug(_("Trying to create PTR-record {a!r} => {f!r}.").format(
+            f=fqdn, a=str(address)))
+
+        pointer = self.pdns.canon_name(address.reverse_pointer)
+        if self.verbose > 1:
+            LOG.debug(_("PTR of {a!r}: {p!r}.").format(a=str(address), p=pointer))
+
+        zone_name = self.pdns.get_zone_for_item(pointer, is_fqdn=True)
+        if zone_name:
+            if self.verbose > 1:
+                LOG.debug(_("Got reverse zone {z!r} for address {a!r}.").format(
+                    z=zone_name, a=str(address)))
+        else:
+            LOG.warn(_("Did not found zone to insert PTR-record {p!r} ({a}).").format(
+                p=pointer, a=str(address)))
+            return True
+
+        zone = self.pdns.zones[zone_name]
+        zone.add_ptr_record(pointer, fqdn)
+        if zone_name not in self.updated_zones:
+            self.updated_zones.append(zone_name)
+        return True
+
+    # --------------------------------------------------------------------------
+    def ensure_project_dir(self):
+
+        print()
+        LOG.info(_("Ensuring existence of directory {!r}.").format(str(self.project_dir)))
+
+        if self.project_dir.exists():
+            if self.project_dir.is_dir():
+                LOG.debug(_("Directory {!r} already exists.").format(str(self.project_dir)))
+            else:
+                msg = _("Path {!r} exists, but is not a directory.").format(str(self.project_dir))
+                raise ExpectedHandlerError(msg)
+        else:
+            LOG.info(_("Creating directory {!r} ...").format(str(self.project_dir)))
+            if self.simulate:
+                LOG.debug(_("Simulation mode - directory will not be created."))
+            else:
+                try:
+                    os.makedirs(str(self.project_dir), mode=0o755)
+                except PermissionError as e:
+                    msg = _("Could not create directory {d!r}: {e}").format(
+                        d=str(self.project_dir), e=e)
+                    raise ExpectedHandlerError(msg)
+
+        if not self.project_dir.exists():
+            if self.simulate:
+                return
+            else:
+                msg = _("Directory {!r} does not exists ?!?!").format(str(self.project_dir))
+                raise ExpectedHandlerError(msg)
+
+        if not os.access(str(self.project_dir), os.W_OK):
+            msg = _("No write access to directory {!r}.").format(str(self.project_dir))
+            raise ExpectedHandlerError(msg)
+
+        LOG.debug(_("Changing into directory {!r}.").format(str(self.project_dir)))
+        os.chdir(str(self.project_dir))
+
+        self.script_dir_rel = Path(os.path.relpath(
+            str(self.script_dir), str(self.project_dir)))
+        LOG.debug(_("Script-Dir relative to project dir: {!r}.").format(str(self.script_dir_rel)))
+
+        if self.verbose > 1:
+            LOG.debug(_("Checking {!r} for a previous terraform configuration.").format(
+                str(self.project_dir)))
+        if os.path.exists('.terraform') and not os.path.isdir('.terraform'):
+            msg = _("In {d!r} there exists already {w!r}, but this is not a directory.").format(
+                d=str(self.project_dir), w='.terraform')
+            raise ExpectedHandlerError(msg)
+        if os.path.exists('terraform.tfstate') and not os.path.isfile('terraform.tfstate'):
+            msg = _("In {d!r} there exists already {w!r}, but this not a file.").format(
+                d=str(self.project_dir), w='terraform.tfstate')
+            raise ExpectedHandlerError(msg)
+        if os.path.isdir('.terraform') and os.path.isfile('terraform.tfstate'):
+            msg = _(
+                "In directory {d!r} there are already existing both {w1!r} and {w2!r}. "
+                "Is this an old terraform project?").format(
+                    d=str(self.project_dir), w1='.terraform', w2='terraform.tfstate')
+            raise ExpectedHandlerError(msg)
+
+    # --------------------------------------------------------------------------
+    def clean_project_dir(self):
+
+        print()
+        LOG.info(_("Cleaning project directory {!r}.").format(str(self.project_dir)))
+
+        files = glob.glob('*') + glob.glob('.terraform')
+        if not files:
+            LOG.debug(_("Directory {!r} is already clean.").format(str(self.project_dir)))
+            return
+        for pfile in files:
+            if os.path.isdir(pfile):
+                LOG.debug(_("Removing recursive directory {!r} ...").format(pfile))
+                if not self.simulate:
+                    shutil.rmtree(pfile)
+            else:
+                LOG.debug(_("Removing {!r} ...").format(pfile))
+                if not self.simulate:
+                    os.remove(pfile)
+
+    # --------------------------------------------------------------------------
+    def create_terraform_files(self):
+
+        print()
+        LOG.info(_("Creating all necessary files for terraform."))
+
+        self.create_varfiles()
+        self.create_dcfile()
+        self.create_backend_file()
+        self.create_instance_files()
+
+    # --------------------------------------------------------------------------
+    def create_varfiles(self):
+
+        content = textwrap.dedent('''\
+        ## filename: terraform.tfvars
+        ## This file declares the values for the variables to be used in the instance.tf playbook
+
+        #
+        # ATTENTION!
+        #
+        # To avoid annoying questions for password and API key
+        # create manually a file 'terraform-private.auto.tfvars"
+        # with the following content:
+        #
+        #   vsphere_password    = "<PASSWORD>"
+        #   pdns_api_key        = "<API-KEY>"
+        #
+        # with the correct values. This file will not be under GIT control
+        #
+
+        ''')
+
+        LOG.debug(_("Creating {!r} ...").format('terraform.tfvars'))
+        if self.simulate:
+            if self.verbose:
+                print(content)
+        else:
+            with open('terraform.tfvars', 'w', **self.open_opts) as fh:
+                fh.write(content)
+            os.chmod('terraform.tfvars', self.std_file_permissions)
+
+        tpl = textwrap.dedent('''\
+        # Private sensible information. Please keep this file secret.
+
+        vsphere_user     = "{u}"
+        vsphere_password = "{p}"
+        pdns_api_key     = "{a}"
+
+        ''')
+
+        content = tpl.format(
+            u=self.config.vsphere_user, p=self.config.vsphere_password,
+            a=self.config.pdns_api_key)
+
+        LOG.debug(_("Creating {!r} ...").format('private.auto.tfvars'))
+        if self.simulate:
+            if self.verbose:
+                print(content)
+        else:
+            with open('private.auto.tfvars', 'w', **self.open_opts) as fh:
+                fh.write(content)
+            os.chmod('private.auto.tfvars', self.std_secure_file_permissions)
+
+        content = textwrap.dedent('''\
+        # filename: variables.tf
+        # definition of the variables to be used in the play
+        # declaration happens in the file terraform.tfvars and private.auto.tfvars
+
+        ''')
+
+        tpl = textwrap.dedent('''\
+        variable "vsphere_vcenter" {{
+          default     = "{}"
+          description = "IP or DNS of the vSphere center."
+          type        = string
+        }}
+
+        ''')
+        content += tpl.format(self.config.vsphere_host)
+
+        tpl = textwrap.dedent('''\
+        variable "vsphere_user" {
+          default     = "Administrator@vsphere.local"
+          description = "vSphere accountname to be used."
+          type        = string
+        }
+
+        variable "vsphere_password" {
+          description = "Password for vSphere accountname."
+          type        = string
+        }
+
+        ''')
+        content += tpl
+
+        tpl = textwrap.dedent('''\
+        variable "vsphere_datacenter" {{
+          default     = "{dc}"
+          description = "Name of the vSphere datacenter to use."
+          type        = string
+        }}
+
+        ''')
+        content += tpl.format(dc=self.config.vsphere_dc)
+
+#        i = 0
+#        for cluster in self.vsphere.clusters:
+#            i += 1
+#            tpl = textwrap.dedent('''\
+#            variable "{v}" {{
+#                default     = "{cl}"
+#                description = "Name of the vSphere host cluster {i} to use."
+#                type        = "string"
+#            }}
+#
+#            ''')
+#            content += tpl.format(v=cluster.var_name, cl=cluster.name, i=i)
+
+        tpl = textwrap.dedent('''\
+        variable "timezone" {{
+          default     = "{tz}"
+          description = "The global timezone used for VMs"
+          type        = string
+        }}
+
+        ''')
+        content += tpl.format(tz=self.tz_name)
+
+        tpl = textwrap.dedent('''\
+        variable "pdns_api_key" {
+          description = "Key for accessing the PowerDNS-API"
+          type        = string
+        }
+
+        ''')
+        content += tpl
+
+        LOG.debug(_("Creating {!r} ...").format('variables.tf'))
+        if self.simulate:
+            if self.verbose:
+                print(content)
+        else:
+            with open('variables.tf', 'w', **self.open_opts) as fh:
+                fh.write(content)
+            os.chmod('variables.tf', self.std_file_permissions)
+
+    # --------------------------------------------------------------------------
+    def create_dcfile(self):
+
+        LOG.debug(_("Creating {!r} ...").format('dc.tf'))
+
+        content = textwrap.dedent('''\
+        # filename: dc.tf
+        # Configuring the VMware vSphere Provider and some dependend common used objects
+
+        provider "vsphere" {
+          vsphere_server       = var.vsphere_vcenter
+          user                 = var.vsphere_user
+          password             = var.vsphere_password
+          allow_unverified_ssl = true
+        ''')
+
+        if self.min_version_vsphere_provider:
+            content += '  version              = ">= {}"\n'.format(
+                str(self.min_version_vsphere_provider))
+
+        content += textwrap.dedent('''\
+        }
+
+        data "vsphere_datacenter" "dc" {
+          name = var.vsphere_datacenter
+        }
+
+        ''')
+
+        for cluster in self.vsphere.clusters:
+            tpl = textwrap.dedent('''\
+            data "vsphere_resource_pool" "{pv}" {{
+              name          = "{pn}"
+              datacenter_id = data.vsphere_datacenter.dc.id
+            }}
+
+            ''')
+            content += tpl.format(
+                pv=cluster.resource_pool_var, pn=cluster.resource_pool_name)
+
+        if self.used_dc_clusters:
+            for dsc_name in sorted(self.used_dc_clusters, key=str.lower):
+                dsc_tf_name = self.vsphere.ds_cluster_mapping[dsc_name]
+                tpl = textwrap.dedent('''\
+                data "vsphere_datastore_cluster" "{tn}" {{
+                  name          = "{n}"
+                  datacenter_id = data.vsphere_datacenter.dc.id
+                }}
+
+                ''')
+                content += tpl.format(tn=dsc_tf_name, n=dsc_name)
+
+        if self.used_datastores:
+            for ds_name in sorted(self.used_datastores, key=str.lower):
+                ds_tf_name = self.vsphere.ds_mapping[ds_name]
+                tpl = textwrap.dedent('''\
+                data "vsphere_datastore" "{tn}" {{
+                  name          = "{n}"
+                  datacenter_id = data.vsphere_datacenter.dc.id
+                }}
+
+                ''')
+                content += tpl.format(tn=ds_tf_name, n=ds_name)
+
+        for net_name in sorted(self.used_networks, key=str.lower):
+            net_tf_name = self.vsphere.network_mapping[net_name]
+            tpl = textwrap.dedent('''\
+            data "vsphere_network" "{tn}" {{
+              name          = "{n}"
+              datacenter_id = data.vsphere_datacenter.dc.id
+            }}
+
+            ''')
+            content += tpl.format(n=net_name, tn=net_tf_name)
+
+        if self.vsphere_templates:
+            for tname in sorted(self.vsphere_templates.keys(), key=str.lower):
+                tpl_tf_name = self.vsphere_templates[tname]['tf_name']
+                tpl = textwrap.dedent('''\
+                data "vsphere_virtual_machine" "{tn}" {{
+                  name          = "{n}"
+                  datacenter_id = data.vsphere_datacenter.dc.id
+                }}
+
+                ''')
+                content += tpl.format(tn=tpl_tf_name, n=tname)
+
+        if self.simulate:
+            if self.verbose:
+                print(content)
+        else:
+            with open('dc.tf', 'w', **self.open_opts) as fh:
+                fh.write(content)
+            os.chmod('dc.tf', self.std_file_permissions)
+
+    # --------------------------------------------------------------------------
+    def create_backend_file(self):
+
+        file_name = 'backend.tf'
+        LOG.debug(_("Creating {!r} ...").format(file_name))
+
+        tpl = textwrap.dedent('''\
+        # Configuration of the backend for storing the terraform status information
+        # and the minimum required version of terraform
+
+        terraform {{
+          backend "consul" {{
+            address = "{host}"
+            scheme  = "{scheme}"
+            path    = "{prefix}/{project}"
+          }}
+        ''')
+
+        content = tpl.format(
+            host=self.config.tf_backend_host, scheme=self.config.tf_backend_scheme,
+            prefix=self.config.tf_backend_path_prefix, project=self.project_name)
+
+        if self.min_version_terraform:
+            content += '  required_version = ">= {}"\n'.format(str(self.min_version_terraform))
+        else:
+            LOG.warn(_("No minimum version of Terraform defined."))
+
+        content += '}\n\n'
+
+        if self.simulate:
+            if self.verbose:
+                print(content)
+        else:
+            with open(file_name, 'w', **self.open_opts) as fh:
+                fh.write(content)
+            os.chmod(file_name, self.std_file_permissions)
+
+    # --------------------------------------------------------------------------
+    def create_instance_files(self):
+
+        LOG.debug(_("Creating terraform files for VM instances."))
+
+        for vm in sorted(self.vms, key=lambda x: x.tf_name):
+            self.create_instance_file(vm)
+
+    # --------------------------------------------------------------------------
+    def create_instance_file(self, vm):
+
+        fname = 'instance.' + vm.name + '.tf'
+        LOG.debug(_("Creating file {f!r} for VM instance {n!r}.").format(
+            f=fname, n=vm.name))
+
+        guest_id = self.config.guest_id
+        tpl_vm = None
+        if vm.vm_template:
+            tpl_vm = self.vsphere_templates[vm.vm_template]
+            if self.verbose > 3:
+                LOG.debug(_("Using template:") + "\n" + pp(tpl_vm))
+            guest_id = 'data.vsphere_virtual_machine.{}.guest_id'.format(
+                tpl_vm['tf_name'])
+        else:
+            guest_id = '"' + guest_id + '"'
+
+        content = self._create_instfile_general(vm, guest_id, tpl_vm)
+
+        i = 0
+        for iface in vm.interfaces:
+            content += self._create_instfile_if(vm, iface, i, tpl_vm)
+            i += 1
+
+        for unit_id in sorted(vm.disks.keys()):
+            content += self._create_instfile_disk(vm, unit_id)
+
+        content += textwrap.indent(textwrap.dedent('''\
+        cdrom {
+          client_device = "true"
+        }
+
+        '''), '  ')
+
+        content += self._create_instfile_custom(vm, tpl_vm)
+
+        if self.verbose > 1:
+            LOG.debug(_("Writing {!r}").format(fname))
+
+        if self.simulate:
+            if self.verbose:
+                print(content)
+        else:
+            with open(fname, 'w', **self.open_opts) as fh:
+                fh.write(content)
+            os.chmod(fname, self.std_file_permissions)
+
+    # --------------------------------------------------------------------------
+    def _create_instfile_general(self, vm, guest_id, tpl_vm):
+
+        # ## General definitions of VM
+        if self.verbose > 1:
+            LOG.debug(_("Generating global definitions of {!r}.").format(vm.name))
+        content = textwrap.dedent('''\
+        # Definition of the VM instance {!r}.
+
+        ''').format(vm.name)
+
+        cluster = self.vsphere.get_cluster_by_name(vm.cluster)
+        if not cluster:
+            msg = _("Cluster {!r} not found - this shouldn't be happened.").format(
+                vm.cluster)
+            raise RuntimeError(msg)
+
+        content += textwrap.dedent('''\
+        resource "vsphere_virtual_machine" "{tn}" {{
+
+          resource_pool_id       = data.vsphere_resource_pool.{pv}.id
+          name                   = "{n}"
+        ''').format(tn=vm.tf_name, n=vm.name, pv=cluster.resource_pool_var)
+
+        if vm.ds_cluster:
+            dsc_tf_name = self.vsphere.ds_cluster_mapping[vm.ds_cluster]
+            tpl = '  datastore_cluster_id   = data.vsphere_datastore_cluster.{}.id\n'
+            content += tpl.format(dsc_tf_name)
+
+        if vm.datastore:
+            ds_tf_name = self.vsphere.ds_mapping[vm.datastore]
+            tpl = '  datastore_id           = data.vsphere_datastore.{}.id\n'
+            content += tpl.format(ds_tf_name)
+
+        content += textwrap.indent(textwrap.dedent('''\
+          num_cpus               = "{cpu}"
+          folder                 = "{f}"
+          num_cores_per_socket   = "1"
+          cpu_hot_add_enabled    = "true"
+          cpu_hot_remove_enabled = "true"
+          memory                 = "{m}"
+          memory_hot_add_enabled = "true"
+          boot_delay             = "{b}"
+          guest_id               = {g}
+        '''), '  ').format(
+            g=guest_id, cpu=vm.num_cpus, f=vm.folder, m=vm.memory, b=int(vm.boot_delay * 1000))
+        if vm.vm_template:
+            tpl = '  scsi_type              = data.vsphere_virtual_machine.{}.scsi_type\n'
+            content += tpl.format(tpl_vm['tf_name'])
+        content += '\n'
+
+        content += textwrap.indent(textwrap.dedent('''\
+        lifecycle {
+          ignore_changes = all
+        }
+        '''), '  ')
+        content += '\n'
+
+        return content
+
+    # --------------------------------------------------------------------------
+    def _create_instfile_if(self, vm, iface, i, tpl_vm):
+
+        # ## Interface definition
+
+        if self.verbose > 1:
+            LOG.debug(_("Generating interface definition {i} of {v!r}.").format(i=i, v=vm.name))
+        nw = iface.network
+        nw_name = self.vsphere.network_mapping[nw]
+
+        content = textwrap.indent(textwrap.dedent('''\
+        network_interface {{
+          network_id   = data.vsphere_network.{n}.id
+          adapter_type = data.{vvm}.{t}.{nit}[0]
+        }}
+        '''), '  ').format(
+            n=nw_name, t=tpl_vm['tf_name'],
+            vvm='vsphere_virtual_machine', nit='network_interface_types')
+        content += '\n'
+
+        return content
+
+    # --------------------------------------------------------------------------
+    def _create_instfile_disk(self, vm, unit_id):
+
+        # ## Disk definitions
+        if self.verbose > 1:
+            LOG.debug(_("Generating disk definition {i} of {v!r}.").format(i=unit_id, v=vm.name))
+        disk = vm.disks[unit_id]
+        content = textwrap.indent(textwrap.dedent('''\
+        disk {{
+          label            = "disk{i}"
+          size             = "{s}"
+          eagerly_scrub    = "false"
+          thin_provisioned = "false"
+        '''), '  ').format(i=unit_id, s=int(disk.size_gb))
+        if unit_id > 0:
+            content += '    unit_number      = {}\n'.format(unit_id)
+        content += '  }\n\n'
+
+        return content
+
+    # --------------------------------------------------------------------------
+    def _create_instfile_custom(self, vm, tpl_vm):
+
+        # ## Customization of VM
+        if self.verbose > 1:
+            LOG.debug(_("Generating customization of {v!r}.").format(v=vm.name))
+
+        content = textwrap.indent(textwrap.dedent('''\
+        clone {{
+          template_uuid = data.vsphere_virtual_machine.{t}.id
+
+          customize {{
+            linux_options {{
+              host_name = "{h}"
+              domain    = "{d}"
+              time_zone = var.timezone
+            }}
+
+        '''), '  ').format(
+            t=tpl_vm['tf_name'], h=vm.hostname, d=vm.domain)
+
+        content += self._create_instfile_nw(vm)
+        content += '    }\n'
+        content += '  }\n\n'
+
+        # ## local SSH cleanup before any actions
+        content += textwrap.indent(textwrap.dedent('''\
+        provisioner "local-exec" {{
+          command = "ssh-keygen -R {h} || true"
+        }}
+
+        provisioner "local-exec" {{
+          command = "ssh-keygen -R {i} || true"
+        }}
+
+        '''), '  ').format(h=vm.fqdn, i=vm.interfaces[0].address)
+
+        # ## Copying postinstall scripts to VM
+
+        files = ['conf-resolver', 'create-motd']
+        if vm.has_puppet:
+            files.append('init-puppet')
+            files.append('update-all-packages')
+
+        for sname in files:
+
+            if self.verbose > 1:
+                LOG.debug(_("Generating file provisioner for {f!r} of {v!r}.").format(
+                    f=sname, v=vm.name))
+
+            content += textwrap.indent(textwrap.dedent('''\
+            provisioner "file" {{
+              source      = "{d}/{f}"
+              destination = "/tmp/{f}"
+              connection {{
+                type = "ssh"
+                user = "root"
+                host = "{h}"
+              }}
+            }}
+
+            '''), '  ').format(
+                d=self.script_dir_rel, f=sname, h=vm.fqdn)
+
+        # ## Postinstall commands on host
+        commands = []
+
+        commands.append("usermod -c 'root {}' root".format(vm.fqdn))
+
+        commands.append("chmod +x /tmp/conf-resolver")
+        commands.append("/tmp/conf-resolver")
+        commands.append("rm -f /tmp/conf-resolver")
+
+        purpose = self.re_doublequote.sub('\\\"', vm.purpose)
+
+        commands.append("chmod +x /tmp/create-motd")
+        commands.append((
+            "/tmp/create-motd --purpose '{p} ({t})' --hardware 'vmware (x86_64)' --owner '{o}' "
+            "--zone 'VMWare' --customer '{c}'  | tee /etc/motd").format(
+                p=purpose, t=vm.puppet_tier, o=vm.customer, c=vm.puppet_customer))
+        commands.append("rm -f /tmp/create-motd")
+
+        # ## Backup - Legato networker
+        commands.append("systemctl stop networker.service")
+        commands.append("rm -rfv /nsr/tmp /nsr/res")
+        if vm.has_backup:
+            commands.append("mkdir -pv /nsr/res")
+            commands.append(
+                "if [ ! -f /nsr/res/servers ] ; then "
+                "echo 'legato01.pixelpark.com' > /nsr/res/servers; fi")
+            commands.append("systemctl start networker.service; sleep 2")
+            commands.append("systemctl stop networker.service; sleep 2")
+            commands.append(
+                "systemctl enable networker.service; systemctl start networker.service; sleep 2")
+        else:
+            commands.append("systemctl disable networker.service")
+
+        # ## Configuring and starting puppet
+        if vm.has_puppet:
+            commands.append("chmod +x /tmp/init-puppet")
+            commands.append((
+                "/tmp/init-puppet --environment '{e}' --customer '{c}' --project '{pr}' "
+                "--role '{r}' --owner '{o}' --tier '{t}' --purpose '{p} ({t})' "
+                "--email '{m}'").format(
+                    p=purpose, t=vm.puppet_tier, o=vm.customer, c=vm.puppet_customer,
+                    pr=vm.puppet_project, m=vm.puppet_contact, e=vm.puppet_env, r=vm.puppet_role))
+            commands.append("rm -f /tmp/init-puppet")
+
+        content += '  provisioner "remote-exec" {\n'
+        content += '    inline = [\n'
+        for cmd in commands:
+            content += '      "{}",\n'.format(cmd)
+        content += '    ]\n'
+        content += '    connection {\n'
+        content += '      type = "ssh"\n'
+        content += '      user = "root"\n'
+        content += '      host = "{}"\n'.format(vm.fqdn)
+        content += '    }\n'
+        content += '  }\n\n'
+
+        # ## postconfigure actions with puppet
+        if vm.has_puppet:
+            content += self._create_instfile_puppet(vm)
+
+        # ## local SSH cleanup on destroy
+        content += textwrap.indent(textwrap.dedent('''\
+        provisioner "local-exec" {{
+          command = "ssh-keygen -R {h} || true"
+          when    = destroy
+        }}
+
+        provisioner "local-exec" {{
+          command = "ssh-keygen -R {i} || true"
+          when    = destroy
+        }}
+        '''), '  ').format(h=vm.fqdn, i=vm.interfaces[0].address)
+
+        content += '}\n\n'
+
+        return content
+
+    # -------------------------------------------------------------------------·
+    def _create_instfile_nw(self, vm):
+
+        content = ''
+
+        gw4 = None
+        gw6 = None
+        for iface in vm.interfaces:
+
+            content += "      network_interface {\n"
+            if iface.address_v4:
+                content += '        ipv4_address = "{}"\n'.format(iface.address_v4)
+                if iface.netmask_v4 is not None:
+                    content += '        ipv4_netmask = "{}"\n'.format(iface.netmask_v4)
+            if iface.address_v6:
+                content += '        ipv6_address = "{}"\n'.format(iface.address_v6)
+                if iface.netmask_v6 is not None:
+                    content += '        ipv6_netmask = "{}"\n'.format(iface.netmask_v6)
+            content += '      }\n\n'
+
+            if not gw4:
+                gw4 = iface.gateway_v4
+            if not gw6:
+                gw6 = iface.gateway_v6
+
+        if gw4:
+            content += '      ipv4_gateway    = "{}"\n'.format(gw4)
+        if gw6:
+            content += '      ipv6_gateway    = "{}"\n'.format(gw6)
+
+        ns = ', '.join(map(lambda x: '"{}"'.format(x), vm.nameservers))
+        content += '      dns_server_list = [{}]\n'.format(ns)
+
+        return content
+
+    # -------------------------------------------------------------------------·
+    def _create_instfile_puppet(self, vm):
+
+        content = textwrap.indent(textwrap.dedent('''\
+        provisioner "local-exec" {{
+          command = "ssh {ca} 'sudo /opt/puppetlabs/bin/puppet cert sign {h} || true'"
+        }}
+
+        provisioner "remote-exec" {{
+          inline = [
+            "/opt/puppetlabs/bin/puppet agent --test || true",
+            "/usr/bin/systemctl start puppet.service",
+            "/usr/bin/systemctl enable puppet.service",
+            "chmod +x /tmp/update-all-packages",
+            "/tmp/update-all-packages",
+            "rm -f /tmp/update-all-packages",
+          ]
+          connection {{
+            type = "ssh"
+            user = "root"
+            host = "{h}"
+          }}
+        }}
+
+        '''), '  ').format(
+            ca=self.config.puppetca, h=vm.fqdn,
+        )
+
+        # Destroy actions with puppet
+        content += textwrap.indent(textwrap.dedent('''\
+        provisioner "remote-exec" {{
+          inline = [
+            "/usr/bin/systemctl stop puppet.service || true",
+          ]
+          when = destroy
+          connection {{
+            type = "ssh"
+            user = "root"
+            host = "{h}"
+          }}
+        }}
+
+        provisioner "local-exec" {{
+          command = "ssh {ma} 'sudo /opt/puppetlabs/bin/puppet node deactivate {h} || true'"
+          when    = destroy
+        }}
+
+        provisioner "local-exec" {{
+          command = "ssh {ca} 'sudo /opt/puppetlabs/bin/puppet cert clean {h} || true'"
+          when    = destroy
+        }}
+
+        '''), '  ').format(
+            ca=self.config.puppetca, h=vm.fqdn, ma=self.config.puppetmaster,
+        )
+
+        return content
+
+    # -------------------------------------------------------------------------·
+    def ensure_vsphere_folders(self):
+
+        print()
+        LOG.info(_("Ensuring existence of all necessary vSphere VM folders."))
+        self.vsphere.ensure_vm_folders(copy.copy(self.vsphere_folders))
+
+    # -------------------------------------------------------------------------·
+    def exec_terraform(self):
+
+        tf_timeout = 30
+
+        print()
+        LOG.info(_("Executing {!r} ...").format('terraform init'))
+        cmd = [str(self.terraform_cmd), 'init']
+        result = self.run(
+            cmd, may_simulate=True, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
+        LOG.debug(_("Completed process:") + "\n" + str(result))
+
+        if self.existing_vms:
+            print()
+            LOG.info(_("Importing existing virtual machines ..."))
+
+            for vm in self.existing_vms:
+
+                print()
+                LOG.info(_("Importing VM {!r}.").format(vm['name']))
+                vm_obj = 'vsphere_virtual_machine.{}'.format(vm['tf_name'])
+                path = '/{dc}/{f}/{p}/{n}'.format(
+                    dc=self.vsphere.dc, f=self.vsphere.dc_obj.vm_folder,
+                    p=vm['path'], n=vm['name'])
+                cmd = [str(self.terraform_cmd), 'import', vm_obj, path]
+                result = self.run(
+                    cmd, may_simulate=True, timeout=tf_timeout,
+                    stdout=PIPE, stderr=PIPE, check=True)
+                LOG.debug(_("Completed process:") + "\n" + str(result))
+
+        print()
+        LOG.info(_("Executing {!r} ...").format('terraform plan'))
+        cmd = [str(self.terraform_cmd), 'plan']
+        result = self.run(
+            cmd, may_simulate=True, timeout=tf_timeout, stdout=PIPE, stderr=PIPE, check=True)
+        LOG.debug(_("Completed process:") + "\n" + str(result))
+
+        goto = Path(os.path.relpath(self.project_dir, self.start_dir))
+
+        print()
+        print()
+        print(self.colored(_("Congratulations!"), 'GREEN'))
+        print()
+        print(_("Now you are ready to deploy the following virtual machines:"))
+        for vm in sorted(self.vms, key=lambda x: x.tf_name):
+            print(" * {}".format(vm.fqdn))
+        print()
+        print(_("To start the deployment process change to directory {}").format(
+            self.colored(str(goto), 'GREEN')))
+        print()
+        print(_("and enter: {}").format(self.colored('terraform apply', 'GREEN')))
+        print()
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+    pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
diff --git a/lib/cr_tf/terraform/__init__.py b/lib/cr_tf/terraform/__init__.py
new file mode 100644 (file)
index 0000000..a687efa
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/env python3
+# -*- coding: utf-8 -*-
+
+__version__ = '1.0.0'
+
+# vim: ts=4 et list
diff --git a/lib/cr_tf/terraform/disk.py b/lib/cr_tf/terraform/disk.py
new file mode 100644 (file)
index 0000000..330836f
--- /dev/null
@@ -0,0 +1,432 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2018 by Frank Brehm, Berlin
+@summary: The module for a VM disk destinated to Terraform
+"""
+from __future__ import absolute_import
+
+# Standard modules
+import logging
+import copy
+
+try:
+    from collections.abc import MutableMapping
+except ImportError:
+    from collections import MutableMapping
+
+# Third party modules
+
+# Own modules
+from fb_tools.obj import FbBaseObject
+
+from ..config import CrTfConfiguration
+
+from ..xlate import XLATOR
+
+__version__ = '1.2.2'
+
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class TerraformDisk(FbBaseObject):
+    """A class encapsulating a disk of a VirtualMachine managed by Terraform."""
+
+    default_size = CrTfConfiguration.default_disk_size
+
+    min_size_gb = CrTfConfiguration.default_disk_min_size
+    max_size_gb = CrTfConfiguration.default_disk_max_size
+
+    msg_no_disk_dict = _("Object {o!r} is not a {e} object.")
+
+    # -------------------------------------------------------------------------
+    def __init__(
+        self, appname=None, verbose=0, version=__version__, base_dir=None, initialized=False,
+            root_disk=False, unit_number=0, size_gb=None):
+
+        self._root_disk = bool(root_disk)
+        self._unit_number = 0
+        self._size_gb = self.default_size
+
+        super(TerraformDisk, self).__init__(
+            appname=appname, verbose=verbose, version=version, base_dir=base_dir,
+            initialized=False,
+        )
+
+        self._set_unit_number(unit_number)
+        if size_gb is not None:
+            self.size_gb = size_gb
+
+        self.initialized = initialized
+
+    # -----------------------------------------------------------
+    @property
+    def root_disk(self):
+        """A flag indicating, that this is the root disk of a VM."""
+        return self._root_disk
+
+    @root_disk.setter
+    def root_disk(self, value):
+        self._root_disk = bool(value)
+
+    # -----------------------------------------------------------
+    @property
+    def unit_number(self):
+        """Number of CPUs of the VM (num_cores_per_socket is always 1)."""
+        return self._unit_number
+
+    # -----------------------------------------------------------
+    @property
+    def size_gb(self):
+        """Size of the disk in GiB."""
+        return self._size_gb
+
+    @size_gb.setter
+    def size_gb(self, value):
+        val = float(value)
+        msg = _("Invalid disk size {n} - size must be {min} <= SIZE <= {max}.").format(
+            n=val, min=self.min_size_gb, max=self.max_size_gb)
+        if val < self.min_size_gb or val > self.max_size_gb:
+            raise ValueError(msg)
+        self._size_gb = val
+
+    # -------------------------------------------------------------------------
+    def as_dict(self, short=True):
+        """
+        Transforms the elements of the object into a dict
+
+        @param short: don't include local properties in resulting dict.
+        @type short: bool
+
+        @return: structure as dict
+        @rtype:  dict
+        """
+
+        res = super(TerraformDisk, self).as_dict(short=short)
+        res['default_size'] = self.default_size
+        res['max_size_gb'] = self.max_size_gb
+        res['min_size_gb'] = self.min_size_gb
+        res['root_disk'] = self.root_disk
+        res['size_gb'] = self.size_gb
+        res['unit_number'] = self.unit_number
+
+        return res
+
+    # -------------------------------------------------------------------------
+    def _set_unit_number(self, value):
+        val = int(value)
+        if self.root_disk:
+            self._unit_number = 0
+            if val != 0:
+                msg = _("A root disk must have always the unit number 0 (given {!r}).").format(
+                    value)
+                raise ValueError(msg)
+            return
+        msg = _("Invalid unit number {n} - number must be {min} <= NUMBER <= {max}.").format(
+            n=val, min=1, max=64)
+        if val < 1 or val > 64:
+            raise ValueError(msg)
+
+        self._unit_number = val
+
+    # -------------------------------------------------------------------------
+    def __copy__(self):
+
+        if self.verbose > 3:
+            LOG.debug(_("Copying Terraform disk object with unit ID {}.").format(self.unit_number))
+
+        disk = self.__class__(
+            appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+            initialized=self.initialized, root_disk=self.root_disk, unit_number=self.unit_number,
+            size_gb=self.size_gb)
+
+        return disk
+
+    # -------------------------------------------------------------------------
+    def __eq__(self, other):
+
+        if not isinstance(other, TerraformDisk):
+            raise TypeError(self.msg_no_disk_dict.format(o=other, e='TerraformDisk'))
+
+        if self.unit_number != other.unit_number:
+            return False
+        if self.root_disk != other.root_disk:
+            return False
+        if self.size_gb != other.size_gb:
+            return False
+
+        return True
+
+
+# =============================================================================
+class TerraformDiskDict(MutableMapping, FbBaseObject):
+    """
+    A dictionary containing TerraformDisk objects.
+    It works like a dict.
+    i.e.:
+    disks = TerraformDiskDict(TerraformDisk(unit_number=0, root=True, size_gb=48, ...))
+    and
+    disks[0] returns a TerraformDisk object for the unit_id = 0
+    """
+
+    msg_invalid_disk_type = _("Invalid disk type {{!r}} to set, only {} allowed.").format(
+        'TerraformDisk')
+    msg_key_not_unit_number = _("The key {k!r} must be equal to the unit_number of the disk {u}.")
+    msg_none_type_error = _("None type as key is not allowed.")
+    msg_empty_key_error = _("Empty key {!r} is not allowed.")
+    msg_no_disk_dict = _("Object {o!r} is not a {e} object.")
+
+    # -------------------------------------------------------------------------
+    def __init__(
+        self, appname=None, verbose=0, version=__version__, base_dir=None, initialized=False,
+            *disks):
+
+        self._map = dict()
+
+        super(TerraformDiskDict, self).__init__(
+            appname=appname, verbose=verbose, base_dir=base_dir, initialized=False)
+
+        for disk in disks:
+            self.append(disk)
+
+        if initialized:
+            self.initialized = True
+
+    # -------------------------------------------------------------------------
+    def _set_item(self, key, disk):
+
+        if not isinstance(disk, TerraformDisk):
+            raise TypeError(self.msg_invalid_disk_type.format(disk.__class__.__name__))
+
+        if disk.unit_number != key:
+            raise KeyError(self.msg_key_not_unit_number.format(k=key, u=disk.unit_number))
+
+        self._map[disk.unit_number] = disk
+
+    # -------------------------------------------------------------------------
+    def append(self, disk):
+
+        if not isinstance(disk, TerraformDisk):
+            raise TypeError(self.msg_invalid_disk_type.format(disk.__class__.__name__))
+
+        self._set_item(disk.unit_number, disk)
+
+    # -------------------------------------------------------------------------
+    def _get_item(self, key):
+
+        if key is None:
+            raise TypeError(self.msg_none_type_error)
+
+        unit_number = int(key)
+        return self._map[unit_number]
+
+    # -------------------------------------------------------------------------
+    def get(self, key):
+        return self._get_item(key)
+
+    # -------------------------------------------------------------------------
+    def _del_item(self, key, strict=True):
+
+        if key is None:
+            raise TypeError(self.msg_none_type_error)
+
+        unit_number = int(key)
+
+        if not strict and unit_number not in self._map:
+            return
+
+        del self._map[unit_number]
+
+    # -------------------------------------------------------------------------
+    # The next five methods are requirements of the ABC.
+    def __setitem__(self, key, value):
+        self._set_item(key, value)
+
+    # -------------------------------------------------------------------------
+    def __getitem__(self, key):
+        return self._get_item(key)
+
+    # -------------------------------------------------------------------------
+    def __delitem__(self, key):
+        self._del_item(key)
+
+    # -------------------------------------------------------------------------
+    def __iter__(self):
+
+        for unit_number in self.keys():
+            yield unit_number
+
+    # -------------------------------------------------------------------------
+    def __len__(self):
+        return len(self._map)
+
+    # -------------------------------------------------------------------------
+    # The next methods aren't required, but nice for different purposes:
+    def __str__(self):
+        '''returns simple dict representation of the mapping'''
+        return str(self._map)
+
+    # -------------------------------------------------------------------------
+    def __repr__(self):
+        '''echoes class, id, & reproducible representation in the REPL'''
+        return '{}, {}({})'.format(
+            super(TerraformDiskDict, self).__repr__(),
+            self.__class__.__name__,
+            self._map)
+
+    # -------------------------------------------------------------------------
+    def __contains__(self, key):
+
+        if key is None:
+            raise TypeError(self.msg_none_type_error)
+
+        unit_number = int(key)
+        return unit_number in self._map
+
+    # -------------------------------------------------------------------------
+    def keys(self):
+
+        return sorted(self._map.keys())
+
+    # -------------------------------------------------------------------------
+    def items(self):
+
+        item_list = []
+
+        for unit_number in self.keys():
+            item_list.append((unit_number, self._map[unit_number]))
+
+        return item_list
+
+    # -------------------------------------------------------------------------
+    def values(self):
+
+        value_list = []
+        for unit_number in self.keys():
+            value_list.append(self._map[unit_number])
+        return value_list
+
+    # -------------------------------------------------------------------------
+    def __eq__(self, other):
+
+        if not isinstance(other, TerraformDiskDict):
+            raise TypeError(self.msg_no_disk_dict.format(o=other, e='TerraformDiskDict'))
+
+        return self._map == other._map
+
+    # -------------------------------------------------------------------------
+    def __ne__(self, other):
+
+        if not isinstance(other, TerraformDiskDict):
+            raise TypeError(self.msg_no_disk_dict.format(o=other, e='TerraformDiskDict'))
+
+        return self._map != other._map
+
+    # -------------------------------------------------------------------------
+    def pop(self, key, *args):
+
+        if key is None:
+            raise TypeError(self.msg_none_type_error)
+
+        unit_number = int(key)
+        return self._map.pop(unit_number, *args)
+
+    # -------------------------------------------------------------------------
+    def popitem(self):
+
+        if not len(self._map):
+            return None
+
+        unit_number = self.keys()[0]
+        disk = self._map[unit_number]
+        del self._map[unit_number]
+        return (unit_number, disk)
+
+    # -------------------------------------------------------------------------
+    def clear(self):
+        self._map = dict()
+
+    # -------------------------------------------------------------------------
+    def setdefault(self, key, default):
+
+        if key is None:
+            raise TypeError(self.msg_none_type_error)
+
+        unit_number = int(key)
+
+        if not isinstance(default, TerraformDisk):
+            raise TypeError(self.msg_invalid_disk_type.format(default.__class__.__name__))
+
+        if unit_number in self._map:
+            return self._map[unit_number]
+
+        self._set_item(unit_number, default)
+        return default
+
+    # -------------------------------------------------------------------------
+    def update(self, other):
+
+        if isinstance(other, TerraformDiskDict) or isinstance(other, dict):
+            for key in other.keys():
+                unit_number = int(key)
+                self._set_item(unit_number, other[key])
+            return
+
+        for tokens in other:
+            key = tokens[0]
+            value = tokens[1]
+            self._set_item(key, value)
+
+    # -------------------------------------------------------------------------
+    def as_dict(self, short=True):
+
+        res = {}
+        res = super(TerraformDiskDict, self).as_dict(short=short)
+        res['map'] = {}
+
+        for unit_number in self._map:
+            res['map'][unit_number] = self._map[unit_number].as_dict(short)
+
+        return res
+
+    # -------------------------------------------------------------------------
+    def as_list(self, short=True):
+
+        res = []
+        for unit_number in self.keys():
+            res.append(self._map[unit_number].as_dict(short))
+        return res
+
+    # -------------------------------------------------------------------------
+    def __copy__(self):
+
+        if self.verbose > 2:
+            LOG.debug(_("Copying Terraform disk dictionary ..."))
+
+        new = self.__class__(
+            appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+            initialized=False)
+
+        for unit_number in self._map:
+            new.append(copy.copy(self._map[unit_number]))
+
+        if self.initialized:
+            new.initialized = True
+
+        return new
+
+
+# =============================================================================
+if __name__ == "__main__":
+
+    pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
diff --git a/lib/cr_tf/terraform/interface.py b/lib/cr_tf/terraform/interface.py
new file mode 100644 (file)
index 0000000..de598bd
--- /dev/null
@@ -0,0 +1,437 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2018 by Frank Brehm, Berlin
+@summary: The module for a VM interface destinated to Terraform
+"""
+from __future__ import absolute_import
+
+# Standard modules
+import logging
+import re
+import ipaddress
+
+try:
+    from collections.abc import Mapping
+except ImportError:
+    from collections import Mapping
+
+# Third party modules
+
+# Own modules
+from fb_tools.common import pp, to_bool, RE_FQDN
+
+from fb_tools.obj import FbBaseObject
+
+from ..errors import TerraformVmDefinitionError
+
+from ..xlate import XLATOR
+
+__version__ = '1.0.1'
+LOG = logging.getLogger(__name__)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class TerraformInterface(FbBaseObject):
+    """A class encapsulating a network interface of a VirtualMachine managed by Terraform."""
+
+    re_address = re.compile(r'^\s*address\s*$', re.IGNORECASE)
+    re_address_v4 = re.compile(r'^\s*address[_-]?(?:ip)?v4\s*$', re.IGNORECASE)
+    re_address_v6 = re.compile(r'^\s*address[_-]?(?:ip)?v6\s*$', re.IGNORECASE)
+    re_fqdn = re.compile(r'^\s*fqdn\s*$', re.IGNORECASE)
+    re_gateway = re.compile(r'^\s*gateway\s*$', re.IGNORECASE)
+    re_gateway_v4 = re.compile(r'^\s*gateway[_-]?(?:ip)?v4\s*$', re.IGNORECASE)
+    re_gateway_v6 = re.compile(r'^\s*gateway[_-]?(?:ip)?v6\s*$', re.IGNORECASE)
+    re_v4_before_v6 = re.compile(
+        r'^\s*(?:ip)?v4[_-](?:before|primary[_-]to)[_-](?:ip)?v6\s*$', re.IGNORECASE)
+    re_network = re.compile(r'^\s*network\s*$', re.IGNORECASE)
+
+    # -------------------------------------------------------------------------
+    def __init__(
+        self, appname=None, verbose=0, version=__version__, base_dir=None, initialized=False,
+            address_v4=None, address_v6=None, fqdn=None, network=None, ipv4_primary=False,
+            gateway_v4=None, gateway_v6=None, netmask_v4=None, netmask_v6=None):
+
+        self._address_v4 = None
+        self._netmask_v4 = None
+        self._address_v6 = None
+        self._netmask_v4 = None
+        self._netmask_v6 = None
+        self._fqdn = None
+        self._network = None
+        self._gateway_v4 = None
+        self._gateway_v6 = None
+        self._ipv4_primary = bool(ipv4_primary)
+
+        super(TerraformInterface, self).__init__(
+            appname=appname, verbose=verbose, version=version, base_dir=base_dir,
+            initialized=False,
+        )
+
+        if address_v4 is not None:
+            self.address_v4 = address_v4
+        if address_v6 is not None:
+            self.address_v6 = address_v6
+        if fqdn is not None:
+            self.fqdn = fqdn
+        if network is not None:
+            self.network = network
+        if gateway_v4 is not None:
+            self.gateway_v4 = gateway_v4
+        if gateway_v6 is not None:
+            self.gateway_v6 = gateway_v6
+        if netmask_v4 is not None:
+            self.netmask_v4 = netmask_v4
+        if netmask_v6 is not None:
+            self.netmask_v6 = netmask_v6
+
+        self.initialized = initialized
+
+    # -----------------------------------------------------------
+    @property
+    def ipv4_primary(self):
+        """Is the IPv6 address prior to the IPv6 address, if both are existing?"""
+        return self._ipv4_primary
+
+    @ipv4_primary.setter
+    def ipv4_primary(self, value):
+        self._ipv4_primary = bool(value)
+
+    # -----------------------------------------------------------
+    @property
+    def address_v4(self):
+        """The IPv4 address of the interface."""
+        return self._address_v4
+
+    @address_v4.setter
+    def address_v4(self, value):
+        if value is None:
+            self._address_v4 = None
+            return
+        val = str(value).strip()
+        if val == '':
+            self._address_v4 = None
+            return
+
+        addr = ipaddress.ip_address(val)
+        if addr.version != 4:
+            msg = _("IP address {!r} is not an IPv4 address.").format(addr)
+            raise ValueError(msg)
+
+        self._address_v4 = addr
+
+    # -----------------------------------------------------------
+    @property
+    def address_v6(self):
+        """The IPv6 address of the interface."""
+        return self._address_v6
+
+    @address_v6.setter
+    def address_v6(self, value):
+        if value is None:
+            self._address_v6 = None
+            return
+        val = str(value).strip()
+        if val == '':
+            self._address_v6 = None
+            return
+
+        addr = ipaddress.ip_address(val)
+        if addr.version != 6:
+            msg = _("IP address {!r} is not an IPv6 address.").format(addr)
+            raise ValueError(msg)
+
+        self._address_v6 = addr
+
+    # -----------------------------------------------------------
+    @property
+    def address(self):
+        """The IPv4 or IPv6 address of the interface."""
+        if self.address_v4 and self.address_v6:
+            if self.ipv4_primary:
+                return self.address_v4
+            else:
+                return self.address_v6
+        if self.address_v4:
+            return self.address_v4
+        if self.address_v6:
+            return self.address_v6
+        return None
+
+    @address.setter
+    def address(self, value):
+        if value is None:
+            return
+        val = str(value).strip()
+        if val == '':
+            return
+
+        addr = ipaddress.ip_address(val)
+        if addr.version == 6:
+            self._address_v6 = addr
+        else:
+            self._address_v4 = addr
+
+    # -----------------------------------------------------------
+    @property
+    def fqdn(self):
+        """The FQDN of the interface address to define."""
+        return self._fqdn
+
+    @fqdn.setter
+    def fqdn(self, value):
+        if value is None:
+            self._fqdn = None
+            return
+
+        val = str(value).strip().lower()
+        if val == '':
+            self._fqdn = None
+            return
+
+        if not RE_FQDN.search(val):
+            msg = _("The hostname {!r} is no a valid FQDN.").format(value)
+            raise ValueError(msg)
+        self._fqdn = val
+
+    # -----------------------------------------------------------
+    @property
+    def network(self):
+        """The name of the VSphere network of the interface."""
+        return self._network
+
+    @network.setter
+    def network(self, value):
+        if value is None:
+            self._network = None
+            return
+
+        val = str(value).strip()
+        if val == '':
+            self._network = None
+            return
+
+        self._network = val
+
+    # -----------------------------------------------------------
+    @property
+    def gateway_v4(self):
+        """The IPv4 gateway of the interface."""
+        return self._gateway_v4
+
+    @gateway_v4.setter
+    def gateway_v4(self, value):
+        if value is None:
+            self._gateway_v4 = None
+            return
+        val = str(value).strip()
+        if val == '':
+            self._gateway_v4 = None
+            return
+
+        addr = ipaddress.ip_address(val)
+        if addr.version != 4:
+            msg = _("IP gateway {!r} is not an IPv4 address.").format(addr)
+            raise ValueError(msg)
+
+        self._gateway_v4 = addr
+
+    # -----------------------------------------------------------
+    @property
+    def gateway_v6(self):
+        """The IPv6 gateway of the interface."""
+        return self._gateway_v6
+
+    @gateway_v6.setter
+    def gateway_v6(self, value):
+        if value is None:
+            self._gateway_v6 = None
+            return
+        val = str(value).strip()
+        if val == '':
+            self._gateway_v6 = None
+            return
+
+        addr = ipaddress.ip_address(val)
+        if addr.version != 6:
+            msg = _("IP gateway {!r} is not an IPv6 address.").format(addr)
+            raise ValueError(msg)
+
+        self._gateway_v6 = addr
+
+    # -----------------------------------------------------------
+    @property
+    def netmask_v4(self):
+        """The IPv4 netmask of the interface."""
+        return self._netmask_v4
+
+    @netmask_v4.setter
+    def netmask_v4(self, value):
+        if value is None:
+            self._netmask_v4 = None
+            return
+        val = int(value)
+        if val < 0 or val > 32:
+            msg = _("Invalid IPv4 netmask {!r}").format(value)
+            raise ValueError(msg)
+
+        self._netmask_v4 = val
+
+    # -----------------------------------------------------------
+    @property
+    def netmask_v6(self):
+        """The IPv6 netmask of the interface."""
+        return self._netmask_v6
+
+    @netmask_v6.setter
+    def netmask_v6(self, value):
+        if value is None:
+            self._netmask_v6 = None
+            return
+        val = int(value)
+        if val < 0 or val > 128:
+            msg = _("Invalid IPv6 netmask {!r}").format(value)
+            raise ValueError(msg)
+
+        self._netmask_v6 = val
+
+    # -----------------------------------------------------------
+    @property
+    def gateway(self):
+        """The IPv4 or IPv6 gateway of the interface."""
+        if self.gateway_v4 and self.gateway_v6:
+            if self.ipv4_primary:
+                return self.gateway_v4
+            else:
+                return self.gateway_v6
+        if self.gateway_v4:
+            return self.gateway_v4
+        if self.gateway_v6:
+            return self.gateway_v6
+        return None
+
+    @gateway.setter
+    def gateway(self, value):
+        if value is None:
+            return
+        val = str(value).strip()
+        if val == '':
+            return
+
+        addr = ipaddress.ip_address(val)
+        if addr.version == 6:
+            self._gateway_v6 = addr
+        else:
+            self._gateway_v4 = addr
+
+    # -------------------------------------------------------------------------
+    def as_dict(self, short=True):
+        """
+        Transforms the elements of the object into a dict
+
+        @param short: don't include local properties in resulting dict.
+        @type short: bool
+
+        @return: structure as dict
+        @rtype:  dict
+        """
+
+        res = super(TerraformInterface, self).as_dict(short=short)
+        res['address'] = self.address
+        res['address_v4'] = self.address_v4
+        res['address_v6'] = self.address_v6
+        res['fqdn'] = self.fqdn
+        res['gateway'] = self.gateway
+        res['gateway_v4'] = self.gateway_v4
+        res['gateway_v6'] = self.gateway_v6
+        res['ipv4_primary'] = self.ipv4_primary
+        res['netmask_v4'] = self.netmask_v4
+        res['netmask_v6'] = self.netmask_v6
+        res['network'] = self.network
+
+        return res
+
+    # -------------------------------------------------------------------------
+    @classmethod
+    def from_def(cls, if_def, appname=None, verbose=0, base_dir=None):
+
+        if verbose > 2:
+            LOG.debug(
+                _("Trying to instantiate terraform interface from data:") + "\n" + pp(if_def))
+
+        if not isinstance(if_def, Mapping):
+            msg = _("Interface definition is not a dictionary:") + "\n" + pp(if_def)
+            raise TerraformVmDefinitionError(msg)
+
+        interface = cls(appname=appname, verbose=verbose, base_dir=base_dir)
+        interface.initialized = False
+
+        for key in if_def.keys():
+
+            val = if_def[key]
+
+            if verbose > 3:
+                LOG.debug(_("Evaluating key {k!r}: {v}").format(k=key, v=val))
+
+            if cls.re_address.search(key) and val:
+                interface.address = val
+                continue
+            if cls.re_address_v4.search(key):
+                interface.address_v4 = val
+                continue
+            if cls.re_address_v6.search(key):
+                interface.address_v6 = val
+                continue
+            if cls.re_v4_before_v6.search(key):
+                interface.ipv4_primary = to_bool(val)
+                continue
+            if cls.re_fqdn.search(key):
+                interface.fqdn = val
+                continue
+            if cls.re_network.search(key):
+                interface.network = val
+                continue
+            if cls.re_gateway.search(key) and val:
+                interface.gateway = val
+                continue
+            if cls.re_gateway_v4.search(key):
+                interface.gateway_v4 = val
+                continue
+            if cls.re_gateway_v6.search(key):
+                interface.gateway_v6 = val
+                continue
+
+        interface.initialized = False
+        return interface
+
+    # -------------------------------------------------------------------------
+    def __copy__(self):
+
+        if self.verbose > 2:
+            LOG.debug(_("Copying Terraform interface object with address {}.").format(
+                self.address))
+
+        disk = self.__class__(
+            appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+            initialized=self.initialized, address_v4=self.address_v4, address_v6=self.address_v6,
+            ipv4_primary=self.ipv4_primary, fqdn=self.fqdn, network=self.network,
+            gateway_v4=self.gateway_v4, gateway_v6=self.gateway_v6
+        )
+
+        return disk
+
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+    pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
diff --git a/lib/cr_tf/terraform/vm.py b/lib/cr_tf/terraform/vm.py
new file mode 100644 (file)
index 0000000..fa79142
--- /dev/null
@@ -0,0 +1,1209 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2018 by Frank Brehm, Berlin
+@summary: The module for a VM destinated to Terraform
+"""
+from __future__ import absolute_import
+
+# Standard modules
+import logging
+import re
+import copy
+import ipaddress
+
+try:
+    from collections.abc import Iterable, Mapping
+except ImportError:
+    from collections import Iterable, Mapping
+
+# Third party modules
+
+# Own modules
+from fb_tools.common import pp, to_bool, RE_FQDN, RE_TF_NAME
+from fb_tools.common import human2mbytes, is_sequence
+
+from fb_tools.handling_obj import HandlingObject
+
+from ..errors import TerraformVmDefinitionError
+
+from ..config import CrTfConfiguration
+
+from ..xlate import XLATOR
+
+from .disk import TerraformDisk, TerraformDiskDict
+
+from .interface import TerraformInterface
+
+__version__ = '1.3.1'
+
+LOG = logging.getLogger(__name__)
+
+PUPPET_TIERS = (
+    'production',
+    'live',
+    'test',
+    'stage',
+    'development',
+)
+
+PUPPET_ENVIRONMENTS = (
+    'production',
+    'test',
+    'development',
+    'dev_chronie'
+)
+
+DS_TYPES = (
+    'ssd',
+    'sas',
+    'sata',
+)
+
+_ = XLATOR.gettext
+ngettext = XLATOR.ngettext
+
+
+# =============================================================================
+class TerraformVm(HandlingObject):
+    """A class encapsulating a VirtualMachine managed by Terraform."""
+
+    default_boot_delay = 5
+    default_customer = 'Pixelpark'
+    default_ds_type = 'sata'
+    default_folder = 'pixelpark'
+    default_memory = 1024
+    default_nameservers = (
+        ipaddress.ip_address('93.188.109.13'),
+        ipaddress.ip_address('217.66.52.10'),
+        ipaddress.ip_address('212.91.225.75')
+    )
+    default_num_cpus = 1
+    default_puppet_contact = '8x5@pixelpark.com'
+    default_puppet_customer = 'pixelpark'
+    default_puppet_env = 'development'
+    default_puppet_tier = 'development'
+    default_puppet_role = 'base_oel7'
+    default_purpose = "Customer project"
+    default_rootdisk_size = 32.0
+
+    max_num_cpus = 64
+    memory_chunk = 256
+    max_memory = 512 * 1024
+    max_boot_delay = 30
+    min_rootdisk_size = CrTfConfiguration.default_root_min_size
+    max_rootdisk_size = CrTfConfiguration.default_root_max_size
+
+    re_key_fqdn = re.compile(r'^\s*fqdn|name\s*$', re.IGNORECASE)
+    re_key_vm_folder = re.compile(r'^\s*(?:vm[_-]?)folder\s*$', re.IGNORECASE)
+    re_key_boot_delay = re.compile(r'^\s*boot[_-]?delay\s*$', re.IGNORECASE)
+    re_key_ds_cluster = re.compile(r'^\s*(?:datastore|ds)[_-]?cluster\s*$', re.IGNORECASE)
+    re_key_ds_type = re.compile(r'^\s*(?:datastore|ds)[_-]?type\s*$', re.IGNORECASE)
+    re_key_puppet_contact = re.compile(r'^\s*puppet[_-]?contact\s*$', re.IGNORECASE)
+    re_key_puppet_customer = re.compile(r'^\s*(?:puppet|hiera)[_-]?customer\s*$', re.IGNORECASE)
+    re_key_puppet_project = re.compile(r'^\s*(?:puppet|hiera)[_-]?project\s*$', re.IGNORECASE)
+    re_key_puppet_tier = re.compile(r'^\s*puppet[_-]?tier\s*$', re.IGNORECASE)
+    re_key_puppet_env = re.compile(r'^\s*puppet[_-]?env(?:ironment)?\s*$', re.IGNORECASE)
+    re_key_puppet_role = re.compile(r'^\s*puppet[_-]?role\s*$', re.IGNORECASE)
+    re_key_env = re.compile(r'^\s*env(?:ironment)?\s*$', re.IGNORECASE)
+    re_key_ns = re.compile(r'^\s*nameservers?\s*$', re.IGNORECASE)
+    re_key_root_disk = re.compile(r'^\s*root[_-]?disk\s*$', re.IGNORECASE)
+    re_key_root_disk_size = re.compile(r'^\s*root[_-]?disk[_-]?size\s*$', re.IGNORECASE)
+    re_key_data_disk = re.compile(r'^\s*data[_-]?disk\s*$', re.IGNORECASE)
+    re_key_data_disks = re.compile(r'^\s*data[_-]?disks\s*$', re.IGNORECASE)
+    re_key_interface = re.compile(r'^\s*interfaces?\s*$', re.IGNORECASE)
+    re_key_has_backup = re.compile(r'^\s*has[_-]?backup\s*$', re.IGNORECASE)
+    re_key_has_puppet = re.compile(r'^\s*has[_-]?puppet\s*$', re.IGNORECASE)
+    re_memory_value = re.compile(r'^\s*(\d+(?:\.\d*)?)\s*(?:(\D+)\s*)?$')
+
+    re_invalid_chars = re.compile(r'[^a-z0-9@\._-]', re.IGNORECASE)
+
+    re_disk_size = re.compile(r'^\s*size\s*$', re.IGNORECASE)
+    re_disk_mountpoint = re.compile(r'^\s*mount[_-]?point\s*$', re.IGNORECASE)
+    re_disk_vgname = re.compile(r'^\s*vg[_-]?name\s*$', re.IGNORECASE)
+    re_disk_lvname = re.compile(r'^\s*lv[_-]?name\s*$', re.IGNORECASE)
+    re_disk_fstype = re.compile(r'^\s*fs[_-]?type\s*$', re.IGNORECASE)
+
+    re_fqdn_dot_at_end = re.compile(r'[^\.]\.$')
+
+    # -------------------------------------------------------------------------
+    def __init__(
+        self, appname=None, verbose=0, version=__version__, base_dir=None,
+            simulate=False, force=None, terminal_has_colors=False, initialized=False,
+            is_template=True, name=None, fqdn=None, folder=None, num_cpus=None, memory=None,
+            cluster=None, boot_delay=None, ds_cluster=None, datastore=None, ds_type=None,
+            customer=None, rootdisk_size=None, purpose=None, puppet_contact=None, puppet_role=None,
+            puppet_customer=None, puppet_project=None, puppet_tier=None, puppet_env=None,
+            vm_template=None, nameservers=None, has_backup=True, has_puppet=True,
+            already_existing=None):
+
+        self._is_template = bool(is_template)
+        self._name = None
+        self._fqdn = None
+        self._cluster = None
+        self._folder = self.default_folder
+        self._num_cpus = self.default_num_cpus
+        self._memory = self.default_memory
+        self._boot_delay = self.default_boot_delay
+        self._ds_cluster = None
+        self._datastore = None
+        self._ds_type = self.default_ds_type
+        self._customer = self.default_customer
+        self._rootdisk_size = self.default_rootdisk_size
+        self._purpose = self.default_purpose
+        self._puppet_contact = self.default_puppet_contact
+        self._puppet_customer = self.default_puppet_customer
+        self._puppet_project = self.default_puppet_customer
+        self._puppet_tier = self.default_puppet_tier
+        self._puppet_env = None
+        self._puppet_role = self.default_puppet_role
+        self._vm_template = CrTfConfiguration.default_template_name
+        self._has_backup = bool(has_backup)
+        self._has_puppet = bool(has_puppet)
+        self._already_existing = False
+
+        self.disks = None
+        self.interfaces = []
+
+        self.nameservers = copy.copy(self.default_nameservers)
+
+        super(TerraformVm, self).__init__(
+            appname=appname, verbose=verbose, version=version, base_dir=base_dir,
+            simulate=simulate, force=force, terminal_has_colors=terminal_has_colors,
+            initialized=False,
+        )
+
+        self._post_init(
+            name=name, fqdn=fqdn, num_cpus=num_cpus, memory=memory, folder=folder,
+            boot_delay=boot_delay, vm_template=vm_template, puppet_contact=puppet_contact,
+            puppet_customer=puppet_customer, puppet_tier=puppet_tier, puppet_env=puppet_env,
+            cluster=cluster, rootdisk_size=rootdisk_size, nameservers=nameservers, purpose=purpose,
+            customer=customer, ds_cluster=ds_cluster, datastore=datastore, ds_type=ds_type,
+            already_existing=already_existing, initialized=initialized, puppet_role=puppet_role,
+            puppet_project=puppet_project,)
+
+    # -------------------------------------------------------------------------
+    def _post_init(self, name=None, fqdn=None, nameservers=None, initialized=False, **kwargs):
+
+        self.disks = TerraformDiskDict(
+            appname=self.appname, verbose=self.verbose, base_dir=self.base_dir)
+
+        if name and str(name).strip():
+            self._name = str(name).strip()
+
+        if not self.is_template and fqdn is not None:
+            self.fqdn = fqdn
+
+        for (key, val) in kwargs.items():
+            if val is None:
+                continue
+            if hasattr(self, str(key)):
+                setattr(self, str(key), val)
+
+        if nameservers is not None:
+            self.nameservers = self._get_ns_list(nameservers)
+
+        if self.is_template:
+            if self.fqdn:
+                msg = _("A VM template definition may not have a FQDN (found: {!r}).").format(
+                    self.fqdn)
+                raise TerraformVmDefinitionError(msg)
+            if not self.name:
+                msg = _("A VM template definition must have a name.")
+                raise TerraformVmDefinitionError(msg)
+        else:
+            if not self.fqdn:
+                msg = _("A VM definition (no template) must have a FQDN.")
+                raise TerraformVmDefinitionError(msg)
+
+        self.apply_root_disk()
+
+        self.initialized = initialized
+
+    # -------------------------------------------------------------------------
+    @classmethod
+    def from_def(
+        cls, vm_def, name=None, is_template=False, template_vm=None, appname=None,
+            verbose=0, base_dir=None, simulate=False, force=False, default_cluster='',
+            terminal_has_colors=False, initialized=False):
+
+        if verbose > 2:
+            LOG.debug(_("Trying to instantiate VM from data:") + "\n" + pp(vm_def))
+
+        if not isinstance(vm_def, Mapping):
+            msg = _("VM definition is not a dictionary:") + "\n" + pp(vm_def)
+            raise TerraformVmDefinitionError(msg)
+
+        if template_vm:
+            if not isinstance(template_vm, TerraformVm):
+                msg = _("Given parameter {!r} is not a TerraformVm object.").format(template_vm)
+                raise TypeError(msg)
+            vm = copy.copy(template_vm)
+            vm.appname = appname
+            vm.verbose = verbose
+            vm.base_dir = base_dir
+            vm.simulate = simulate
+            vm.force = force
+            vm.terminal_has_colors = terminal_has_colors
+        else:
+            vm = cls(
+                appname=appname, verbose=verbose, base_dir=base_dir, simulate=simulate,
+                force=force, is_template=is_template, name=name, cluster=default_cluster,
+                terminal_has_colors=terminal_has_colors)
+        vm.initialized = False
+
+        vm.is_template = is_template
+        vm.name = name
+
+        for (key, value) in vm_def.items():
+            cls._apply_vmdef2vm(
+                vm, key, value, verbose=verbose, appname=appname, base_dir=base_dir)
+
+        vm.apply_root_disk()
+        if vm.interfaces and vm.fqdn and not vm.interfaces[0].fqdn:
+            vm.interfaces[0].fqdn = vm.fqdn
+
+        vm.initialized = True
+        return vm
+
+    # -------------------------------------------------------------------------
+    @classmethod
+    def _apply_vmdef2vm(cls, vm, key, value, verbose=0, appname=None, base_dir=None):
+
+        if verbose > 3:
+            LOG.debug(_("Evaluating key {k!r}: {v}").format(k=key, v=value))
+
+        if cls._apply_general_vmdef2vm(vm, key, value, verbose):
+            return
+
+        if key.lower() == 'customer' and value.strip():
+            vm.customer = value.strip()
+            return
+
+        if key.lower() == 'purpose' and value:
+            vm.purpose = value.strip()
+            return
+
+        if key.lower() == 'template' and value:
+            vm.vm_template = value
+            return
+
+        if cls.re_key_has_backup.search(key):
+            vm.has_backup = to_bool(value)
+            return
+
+        if cls._apply_puppet_vmdef2vm(vm, key, value, verbose):
+            return
+
+        if cls._apply_disk_vmdef2vm(vm, key, value, verbose):
+            return
+
+        if cls.re_key_ns.search(key):
+            if isinstance(value, Iterable):
+                ns = cls._get_ns_list(value)
+                if ns:
+                    vm.nameservers = ns
+            elif value is None:
+                vm.nameservers = []
+            else:
+                LOG.error(_("Could not evaluate nameservers from {!r}.").format(value))
+            return
+
+        if cls.re_key_interface.search(key):
+            if vm.is_template:
+                LOG.error(_("Template definitions may not have interface definitions."))
+                return
+            if isinstance(value, Iterable):
+                for if_def in value:
+                    interface = TerraformInterface.from_def(
+                        if_def, appname=appname, verbose=verbose, base_dir=base_dir)
+                    vm.interfaces.append(interface)
+            else:
+                LOG.error(_("Could not evaluate interfaces from {!r}.").format(value))
+            return
+
+        LOG.debug(_("Unknown VM definition key {k!r} with value: {v!r}.").format(
+            k=key, v=value))
+
+    # -------------------------------------------------------------------------
+    @classmethod
+    def _apply_general_vmdef2vm(cls, vm, key, value, verbose=0):
+
+        if not vm.is_template and cls.re_key_fqdn.search(key):
+            vm.fqdn = value
+            return True
+
+        if key.lower() == 'cluster':
+            vm.cluster = value
+            return True
+
+        if key.lower() == 'num_cpus':
+            vm.num_cpus = value
+            return True
+
+        if key.lower() == 'memory':
+            vm.memory = value
+            return True
+
+        if cls.re_key_vm_folder.search(key) and value:
+            vm.folder = value
+            return True
+
+        if cls.re_key_boot_delay.search(key) and value:
+            vm.boot_delay = value
+            return True
+
+        if cls.re_key_ds_cluster.search(key) and value:
+            vm.ds_cluster = value
+            return True
+
+        if key.lower() == 'datastore' and value:
+            vm.datastore = value
+            return True
+
+        if cls.re_key_ds_type.search(key) and value:
+            vm.ds_type = value
+            return True
+
+        return False
+
+    # -------------------------------------------------------------------------
+    @classmethod
+    def _apply_disk_vmdef2vm(cls, vm, key, value, verbose=0):
+
+        if cls.re_key_root_disk_size.search(key):
+            vm.rootdisk_size = value
+            return True
+
+        if cls.re_key_root_disk.search(key):
+            if isinstance(value, Mapping):
+                for (p_key, p_val) in value.items():
+                    if p_key.lower() == 'size':
+                        vm.rootdisk_size = p_val
+            else:
+                LOG.error(_(
+                    "Could not evaluate size of root disk, {!r} is not a dictionary.").format(
+                    value))
+            return True
+
+        if cls.re_key_data_disk.search(key):
+            if isinstance(value, Mapping):
+                vm._add_data_disk(value)
+            elif value is None:
+                if 1 in vm.disks:
+                    del vm.disks[1]
+            else:
+                LOG.error(_("Could not evaluate data disk from {!r}.").format(value))
+            return True
+
+        if cls.re_key_data_disks.search(key):
+            if is_sequence(value):
+                unit_number = 1
+                if 1 in vm.disks:
+                    unit_number = 2
+                for disk_data in value:
+                    vm._add_data_disk(disk_data, unit_number)
+                    unit_number += 1
+            elif value is None:
+                if verbose > 1:
+                    LOG.debug(_("Data disks for VM {!r} were set to None.").format(vm.name))
+            else:
+                LOG.error(_("Could not evaluate data disks from {!r}.").format(value))
+            return True
+
+        return False
+
+    # -------------------------------------------------------------------------
+    @classmethod
+    def _apply_puppet_vmdef2vm(cls, vm, key, value, verbose=0):
+
+        if key.lower() == 'puppet' and isinstance(value, Mapping):
+            for (p_key, p_value) in value.items():
+
+                p_value_stripped = p_value.strip()
+                if verbose > 2:
+                    LOG.debug(_("Evaluating sub key of {d!r}: {k!r} => {v!r}").format(
+                        d='puppet', k=p_key, v=p_value_stripped))
+
+                if p_key.lower() == 'contact' and p_value_stripped:
+                    if cls.re_invalid_chars.search(p_value_stripped):
+                        LOG.error(_("Invalid puppet contact name {!r}.").format(p_value))
+                    else:
+                        vm.puppet_contact = p_value_stripped
+                    continue
+
+                if p_key.lower() == 'customer' and p_value_stripped:
+                    if cls.re_invalid_chars.search(p_value_stripped):
+                        LOG.error(_("Invalid puppet customer name {!r}.").format(p_value))
+                    else:
+                        vm.puppet_customer = p_value_stripped
+                    continue
+
+                if p_key.lower() == 'project' and p_value_stripped:
+                    if cls.re_invalid_chars.search(p_value_stripped):
+                        LOG.error(_("Invalid puppet customer project name {!r}.").format(p_value))
+                    else:
+                        vm.puppet_project = p_value_stripped
+                    continue
+
+                if p_key.lower() == 'role' and p_value_stripped:
+                    if cls.re_invalid_chars.search(p_value_stripped):
+                        LOG.error(_("Invalid puppet role {!r}.").format(p_value))
+                    else:
+                        vm.puppet_role = p_value_stripped
+                    continue
+
+                if p_key.lower() == 'tier' and p_value_stripped:
+                    if cls.re_invalid_chars.search(p_value_stripped):
+                        LOG.error(_("Invalid puppet tier {!r}.").format(p_value))
+                    else:
+                        vm.puppet_tier = p_value_stripped
+                    continue
+
+                if cls.re_key_env.search(p_key) and p_value_stripped:
+                    if verbose > 2:
+                        LOG.debug(_("Setting Puppet environment to {!r}.").format(p_value_stripped))
+                    LOG.debug
+                    if cls.re_invalid_chars.search(p_value_stripped):
+                        LOG.error(_("Invalid puppet environment {!r}.").format(p_value))
+                    else:
+                        vm.puppet_env = p_value_stripped
+
+            return True
+
+        if cls.re_key_has_puppet.search(key):
+            vm.has_puppet = to_bool(value)
+            return True
+
+        if not hasattr(value, 'strip'):
+            if verbose > 3:
+                LOG.debug(_("Key {k!r} has no string value, but a {c!r} instead.").format(
+                    k=key, c=value.__class__.__name__))
+            return False
+
+        val_stripped = value.strip()
+
+        if cls.re_key_puppet_contact.search(key) and val_stripped:
+            if cls.re_invalid_chars.search(val_stripped):
+                LOG.error(_("Invalid contact name {!r}.").format(value))
+            else:
+                vm.puppet_contact = val_stripped
+            return True
+
+        if cls.re_key_puppet_customer.search(key) and val_stripped:
+            if cls.re_invalid_chars.search(val_stripped):
+                LOG.error(_("Invalid puppet customer name {!r}.").format(value))
+            else:
+                vm.puppet_customer = val_stripped
+            return True
+
+        if cls.re_key_puppet_project.search(key) and val_stripped:
+            if cls.re_invalid_chars.search(val_stripped):
+                LOG.error(_("Invalid puppet customer project name {!r}.").format(value))
+            else:
+                vm.puppet_project = val_stripped
+            return True
+
+        if cls.re_key_puppet_role.search(key) and val_stripped:
+            if cls.re_invalid_chars.search(val_stripped):
+                LOG.error(_("Invalid puppet role {!r}.").format(value))
+            else:
+                vm.puppet_role = val_stripped
+            return True
+
+        if cls.re_key_puppet_tier.search(key) and val_stripped:
+            if cls.re_invalid_chars.search(val_stripped):
+                LOG.error(_("Invalid puppet tier {!r}.").format(value))
+            else:
+                vm.puppet_tier = val_stripped
+            return True
+
+        if cls.re_key_puppet_env.search(key) and val_stripped:
+            if verbose > 2:
+                LOG.debug(_("Setting Puppet environment to {!r}.").format(p_value_stripped))
+            if cls.re_invalid_chars.search(val_stripped):
+                LOG.error(_("Invalid puppet environment {!r}.").format(value))
+            else:
+                vm.puppet_env = val_stripped
+            return True
+
+        return False
+
+    # -------------------------------------------------------------------------
+    def __copy__(self):
+
+        if self.verbose > 2:
+            n = self.name
+            if self.is_template:
+                tpl = _('Template')
+                if n is None:
+                    n = tpl
+                else:
+                    n += ' (' + tpl + ')'
+            LOG.debug(_("Copying Terraform VM object {!r} ...").format(n))
+
+        vm = self.__class__(
+            appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+            simulate=self.simulate, force=self.force, initialized=self.initialized,
+            terminal_has_colors=self.terminal_has_colors, name=self.name,
+            is_template=self.is_template, fqdn=self.fqdn, folder=self.folder,
+            num_cpus=self.num_cpus, memory=self.memory, boot_delay=self.boot_delay,
+            cluster=self.cluster, ds_cluster=self.ds_cluster, datastore=self.datastore,
+            ds_type=self.ds_type, customer=self.customer, purpose=self.purpose,
+            vm_template=self.vm_template, puppet_contact=self.puppet_contact,
+            puppet_customer=self.puppet_customer, puppet_tier=self.puppet_tier,
+            puppet_env=self.puppet_env, puppet_role=self.puppet_role, nameservers=self.nameservers,
+            rootdisk_size=self.rootdisk_size, has_backup=self.has_backup,
+            has_puppet=self.has_puppet, puppet_project=self.puppet_project,
+        )
+
+        vm.disks = copy.copy(self.disks)
+
+        vm.interfaces = []
+        for interface in self.interfaces:
+            vm.interfaces.append(copy.copy(interface))
+
+        return vm
+
+    # -------------------------------------------------------------------------
+    @classmethod
+    def _get_ns_list(cls, nameservers):
+
+        if not isinstance(nameservers, Iterable):
+            raise ValueError(_("Parameter nameservers {!r} is not iterable.").format(nameservers))
+
+        ns = []
+        i = 1
+        for val in nameservers:
+            try:
+                address = ipaddress.ip_address(val)
+                if i > 3:
+                    LOG.warn(_(
+                        "There are at most three nameservers accepted, {} "
+                        "will not be considered.").format(address))
+                elif address not in ns:
+                    ns.append(address)
+                    i += 1
+            except Exception as e:
+                LOG.error(_("Invalid nameserver address {v!r}: {e}").format(
+                    v=val, e=e))
+
+        return ns
+
+    # -----------------------------------------------------------
+    @property
+    def is_template(self):
+        """A flag indicating, that this is a template instance."""
+        return self._is_template
+
+    @is_template.setter
+    def is_template(self, value):
+        self._is_template = bool(value)
+
+    # -----------------------------------------------------------
+    @property
+    def has_backup(self):
+        """A flag indicating, that the VM should run the backup client."""
+        return self._has_backup
+
+    @has_backup.setter
+    def has_backup(self, value):
+        self._has_backup = bool(value)
+
+    # -----------------------------------------------------------
+    @property
+    def has_puppet(self):
+        """A flag indicating, that the VM should ishould be managed by puppet."""
+        return self._has_puppet
+
+    @has_puppet.setter
+    def has_puppet(self, value):
+        self._has_puppet = bool(value)
+
+    # -----------------------------------------------------------
+    @property
+    def fqdn(self):
+        """The FQDN of the VM to define. May be Non in case of template instances."""
+        return self._fqdn
+
+    @fqdn.setter
+    def fqdn(self, value):
+        if value is None:
+            self._fqdn = None
+            return
+
+        val = str(value).strip().lower()
+        if val == '':
+            self._fqdn = None
+            return
+
+        if not RE_FQDN.search(val):
+            msg = _("The hostname {!r} is no a valid FQDN.").format(value)
+            raise ValueError(msg)
+
+        if self.re_fqdn_dot_at_end.search(val):
+            msg = _("The hostname {!r} may not end with a dot '.'.").format(value)
+            raise ValueError(msg)
+
+        self._fqdn = val
+
+    # -----------------------------------------------------------
+    @property
+    def name(self):
+        """The name of the VM - if it is no template, then the FQDN."""
+        if self.is_template:
+            return self._name
+        return self._fqdn
+
+    @name.setter
+    def name(self, value):
+        if value is None:
+            if not self.is_template:
+                self._name = None
+                return
+            msg = _("The name of a template VM may not be None.")
+            raise TerraformVmDefinitionError(msg)
+
+        val = str(value).strip()
+        if val == '':
+            if not self.is_template:
+                self._name = None
+                return
+            msg = _("The name of a template VM may not be empty.")
+            raise TerraformVmDefinitionError(msg)
+
+        self._name = val
+
+    # -----------------------------------------------------------
+    @property
+    def tf_name(self):
+        """The name of the VM how used in terraform."""
+        if self.name is None:
+            return None
+        return 'vm_' + RE_TF_NAME.sub('_', self.name.lower())
+
+    # -----------------------------------------------------------
+    @property
+    def hostname(self):
+        """The base hostname of the VM (without domain)."""
+        if self._fqdn is None:
+            return None
+        return self._fqdn.split('.')[0]
+
+    # -----------------------------------------------------------
+    @property
+    def domain(self):
+        """The domain part of the host FQDN."""
+        if self._fqdn is None:
+            return None
+        return '.'.join(self._fqdn.split('.')[1:])
+
+    # -----------------------------------------------------------
+    @property
+    def num_cpus(self):
+        """Number of CPUs of the VM (num_cores_per_socket is always 1)."""
+        return self._num_cpus
+
+    @num_cpus.setter
+    def num_cpus(self, value):
+        val = int(value)
+        if val < 1 or val > self.max_num_cpus:
+            msg = _(
+                "Invalid number of CPUs {n} - number must be "
+                "{min} <= NUMBER <= {max}.").format(n=val, min=1, max=self.max_num_cpus)
+            raise ValueError(msg)
+        self._num_cpus = val
+
+    # -----------------------------------------------------------
+    @property
+    def memory(self):
+        """Memory of the VM in MiBytes, must be a multiple of 256."""
+        return self._memory
+
+    @memory.setter
+    def memory(self, value):
+
+        value = str(value)
+        if self.verbose > 2:
+            LOG.debug(_("Trying to detect memory from value {!r}.").format(value))
+
+        match = self.re_memory_value.search(value)
+        if not match:
+            raise ValueError(_("Invalid memory {!r}.").format(value))
+        val_raw = match.group(1)
+        unit = match.group(2)
+        if unit:
+            val_raw = "{v} {u}".format(v=val_raw, u=unit)
+        else:
+            val_raw += ' MiB'
+
+        val = human2mbytes(val_raw)
+        if val < self.memory_chunk or val > self.max_memory:
+            msg = _("Invalid memory {m} - memory must be {min} <= MiBytes <= {max}.").format(
+                m=val, min=self.memory_chunk, max=self.max_memory)
+            raise ValueError(msg)
+        modulus = val % self.memory_chunk
+        if modulus:
+            msg = _("Invalid memory {m}, must be a multipe of {c}.").format(
+                m=val, c=self.memory_chunk)
+            raise ValueError(msg)
+        self._memory = val
+
+    # -----------------------------------------------------------
+    @property
+    def cluster(self):
+        """The name of the computing cluster, where the VM should be instantiated."""
+        return self._cluster
+
+    @cluster.setter
+    def cluster(self, value):
+        if value is None:
+            msg = _("The name of the computing cluster of the VM may not be None.")
+            raise TerraformVmDefinitionError(msg)
+
+        val = str(value).strip()
+        if val == '':
+            msg = _("The name of the computing cluster of the VM may not be empty.")
+            raise TerraformVmDefinitionError(msg)
+
+        self._cluster = val
+
+    # -----------------------------------------------------------
+    @property
+    def folder(self):
+        """The VM folder of the VM in VSphere."""
+        return self._folder
+
+    @folder.setter
+    def folder(self, value):
+        if value is None:
+            LOG.warn(_("A folder name may not be None."))
+            return
+
+        val = str(value).strip()
+        if val == '':
+            LOG.warn(_("A folder name may not be empty."))
+            return
+        self._folder = val
+
+    # -----------------------------------------------------------
+    @property
+    def boot_delay(self):
+        """Boot delay in seconds of the VM."""
+        return self._boot_delay
+
+    @boot_delay.setter
+    def boot_delay(self, value):
+        val = float(value)
+        if val < 0 or val > self.max_boot_delay:
+            msg = _(
+                "Invalid boot delay {b:0.1} - delay must be "
+                "{min} <= NUMBER <= {max}.").format(b=val, min=0, max=self.max_boot_delay)
+            raise ValueError(msg)
+        self._boot_delay = val
+
+    # -----------------------------------------------------------
+    @property
+    def ds_cluster(self):
+        """An optional defined datastore cluster."""
+        return self._ds_cluster
+
+    @ds_cluster.setter
+    def ds_cluster(self, value):
+        if value is None:
+            self._ds_cluster = None
+            return
+        val = str(value).strip()
+        if val == '':
+            self._ds_cluster = None
+            return
+        self._ds_cluster = val
+
+    # -----------------------------------------------------------
+    @property
+    def datastore(self):
+        """An optional defined datastore."""
+        return self._datastore
+
+    @datastore.setter
+    def datastore(self, value):
+        if value is None:
+            self._datastore = None
+            return
+        val = str(value).strip()
+        if val == '':
+            self._datastore = None
+            return
+        self._datastore = val
+
+    # -----------------------------------------------------------
+    @property
+    def ds_type(self):
+        """The type of the datastore (SATA,SAS or SSD).
+            Used for autoexpolring."""
+        return self._ds_type
+
+    @ds_type.setter
+    def ds_type(self, value):
+        if value is None:
+            self._ds_type = None
+            return
+        val = str(value).strip().lower()
+        if val == '':
+            self._ds_type = None
+            return
+        if val not in DS_TYPES:
+            msg = _("Datastore type {t!r} not allowed, valid datastore types are: {li}").format(
+                t=value, li=DS_TYPES)
+            raise ValueError(msg)
+        self._ds_type = val
+
+    # -----------------------------------------------------------
+    @property
+    def customer(self):
+        """The customer of the VM in VSphere."""
+        return self._customer
+
+    @customer.setter
+    def customer(self, value):
+        if value is None:
+            LOG.warn(_("A customer name may not be None."))
+            return
+
+        val = str(value).strip()
+        if val == '':
+            LOG.warn(_("A customer name may not be empty."))
+            return
+        self._customer = val
+
+    # -----------------------------------------------------------
+    @property
+    def owner(self):
+        """The customer of the VM in VSphere for /etc/motd."""
+        return self._customer
+
+    # -----------------------------------------------------------
+    @property
+    def purpose(self):
+        """The purpose of the VM in VSphere."""
+        return self._purpose
+
+    @purpose.setter
+    def purpose(self, value):
+        if value is None:
+            LOG.warn(_("A purpose may not be None."))
+            return
+
+        val = str(value).strip()
+        if val == '':
+            LOG.warn(_("A purpose may not be empty."))
+            return
+        self._purpose = val
+
+    # -----------------------------------------------------------
+    @property
+    def vm_template(self):
+        """The name of the VM or template in VSphere to use as template."""
+        return self._vm_template
+
+    @vm_template.setter
+    def vm_template(self, value):
+        if value is None:
+            LOG.warn(_("A template VM name may not be None."))
+            return
+
+        val = str(value).strip()
+        if val == '':
+            LOG.warn(_("A template VM name may not be empty."))
+            return
+        self._vm_template = val
+
+    # -----------------------------------------------------------
+    @property
+    def puppet_contact(self):
+        """The name or address of the contact for the VM."""
+        return self._puppet_contact
+
+    @puppet_contact.setter
+    def puppet_contact(self, value):
+        if value is None:
+            LOG.warn(_("A puppet contact name may not be None."))
+            return
+
+        val = str(value).strip()
+        if val == '':
+            LOG.warn(_("A puppet contact name may not be empty."))
+            return
+        self._puppet_contact = val
+
+    # -----------------------------------------------------------
+    @property
+    def puppet_customer(self):
+        """The name of the puppet hiera customer for the VM."""
+        return self._puppet_customer
+
+    @puppet_customer.setter
+    def puppet_customer(self, value):
+        if value is None:
+            LOG.warn(_("A puppet hiera customer name may not be None."))
+            return
+
+        val = str(value).strip()
+        if val == '':
+            LOG.warn(_("A puppet hiera customer name may not be empty."))
+            return
+        if '/' in val:
+            LOG.error(_("A puppet hiera customer name may not contain a slash (/) character."))
+            return
+        self._puppet_customer = val
+
+    # -----------------------------------------------------------
+    @property
+    def puppet_project(self):
+        """The name of the puppet customer project for the VM."""
+        return self._puppet_project
+
+    @puppet_project.setter
+    def puppet_project(self, value):
+        if value is None:
+            LOG.warn(_("A puppet hiera project name may not be None."))
+            return
+
+        val = str(value).strip().lower()
+        if val == '':
+            LOG.warn(_("A puppet hiera customer project may not be empty."))
+            return
+        if '/' in val:
+            LOG.error(_("A puppet hiera customer project may not contain a slash (/) character."))
+            return
+        self._puppet_project = val
+
+    # -----------------------------------------------------------
+    @property
+    def hiera_customer(self):
+        """The name of the hiera customer for the VM."""
+        return self._puppet_customer
+
+    @hiera_customer.setter
+    def hiera_customer(self, value):
+        self.puppet_customer = value
+
+    # -----------------------------------------------------------
+    @property
+    def hiera_project(self):
+        """The name of the customer project for the VM."""
+        return self._puppet_project
+
+    @hiera_project.setter
+    def hiera_project(self, value):
+        self.puppet_project = value
+
+    # -----------------------------------------------------------
+    @property
+    def puppet_role(self):
+        """The name of the puppet role for the VM."""
+        return self._puppet_role
+
+    @puppet_role.setter
+    def puppet_role(self, value):
+        if value is None:
+            LOG.warn(_("A puppet role may not be None."))
+            return
+
+        val = str(value).strip()
+        if val == '':
+            LOG.warn(_("A puppet role may not be empty."))
+            return
+        self._puppet_role = val
+
+    # -----------------------------------------------------------
+    @property
+    def puppet_tier(self):
+        """The name of the puppet tier of the VM."""
+        return self._puppet_tier
+
+    @puppet_tier.setter
+    def puppet_tier(self, value):
+        if value is None:
+            LOG.warn(_("A puppet tier name may not be None."))
+            return
+
+        val = str(value).strip().lower()
+        if val == '':
+            LOG.warn(_("A puppet tier name may not be empty."))
+            return
+
+        if val not in PUPPET_TIERS:
+            LOG.warn(_("A puppet tier should be one of {li} (given: {v!r}).").format(
+                li=pp(PUPPET_TIERS), v=value))
+
+        self._puppet_tier = val
+
+    # -----------------------------------------------------------
+    @property
+    def puppet_env(self):
+        """The name of the puppet environment of the VM."""
+        if self._puppet_env is not None:
+            return self._puppet_env
+        if self.is_template:
+            return None
+        return self.puppet_tier
+
+    @puppet_env.setter
+    def puppet_env(self, value):
+        if value is None:
+            return
+
+        val = str(value).strip().lower()
+        if val == '':
+            self._puppet_env = None
+            return
+
+        if val not in PUPPET_ENVIRONMENTS:
+            LOG.warn(_("A puppet environment must be one of {li} (given: {v!r}).").format(
+                li=pp(PUPPET_ENVIRONMENTS), v=value))
+            return
+
+        self._puppet_env = val
+
+    # -----------------------------------------------------------
+    @property
+    def puppet_environment(self):
+        """The name of the puppet environment of the VM."""
+        return self.puppet_env
+
+    @puppet_environment.setter
+    def puppet_environment(self, value):
+        self.puppet_env = value
+
+    # -----------------------------------------------------------
+    @property
+    def rootdisk_size(self):
+        """Size of the root disk of the VM in GiB."""
+        return self._rootdisk_size
+
+    @rootdisk_size.setter
+    def rootdisk_size(self, value):
+        val = float(value)
+        msg = _(
+            "Invalid root disk size {n} - size must be "
+            "{min} <= SIZE <= {max}.").format(
+            n=val, min=self.min_rootdisk_size, max=self.max_rootdisk_size)
+        if val < self.min_rootdisk_size or val > self.max_rootdisk_size:
+            raise ValueError(msg)
+        self._rootdisk_size = val
+
+    # -----------------------------------------------------------
+    @property
+    def already_existing(self):
+        """The Virtual machine is already existing in VSphere."""
+        return self._already_existing
+
+    @already_existing.setter
+    def already_existing(self, value):
+        self._already_existing = to_bool(value)
+
+    # -------------------------------------------------------------------------
+    def as_dict(self, short=True):
+        """
+        Transforms the elements of the object into a dict
+
+        @param short: don't include local properties in resulting dict.
+        @type short: bool
+
+        @return: structure as dict
+        @rtype:  dict
+        """
+
+        res = super(TerraformVm, self).as_dict(short=short)
+        res['already_existing'] = self.already_existing
+        res['boot_delay'] = self.boot_delay
+        res['cluster'] = self.cluster
+        res['customer'] = self.customer
+        res['datastore'] = self.datastore
+        res['domain'] = self.domain
+        res['ds_cluster'] = self.ds_cluster
+        res['ds_type'] = self.ds_type
+        res['folder'] = self.folder
+        res['fqdn'] = self.fqdn
+        res['has_backup'] = self.has_backup
+        res['has_puppet'] = self.has_puppet
+        res['hiera_customer'] = self.hiera_customer
+        res['hiera_project'] = self.hiera_project
+        res['hostname'] = self.hostname
+        res['interfaces'] = []
+        res['is_template'] = self.is_template
+        res['memory'] = self.memory
+        res['name'] = self.name
+        res['num_cpus'] = self.num_cpus
+        res['owner'] = self.owner
+        res['puppet_contact'] = self.puppet_contact
+        res['puppet_customer'] = self.puppet_customer
+        res['puppet_project'] = self.puppet_project
+        res['puppet_env'] = self.puppet_env
+        res['puppet_environment'] = self.puppet_environment
+        res['puppet_role'] = self.puppet_role
+        res['puppet_tier'] = self.puppet_tier
+        res['purpose'] = self.purpose
+        res['rootdisk_size'] = self.rootdisk_size
+        res['tf_name'] = self.tf_name
+        res['vm_template'] = self.vm_template
+
+        for interface in self.interfaces:
+            res['interfaces'].append(interface.as_dict(short=short))
+
+        return res
+
+    # -------------------------------------------------------------------------
+    def apply_root_disk(self):
+
+        if self.verbose > 2:
+            LOG.debug(_("Resetting root disk."))
+
+        disk = TerraformDisk(
+            appname=self.appname, verbose=self.verbose, base_dir=self.base_dir,
+            root_disk=True, unit_number=0, size_gb=self.rootdisk_size,
+            initialized=True)
+
+        self.disks[0] = disk
+
+    # -------------------------------------------------------------------------
+    def _add_data_disk(self, disk_def, unit_number=1):
+
+        params = {'unit_number': unit_number}
+        for key in disk_def.keys():
+            val = disk_def[key]
+            if self.re_disk_size.search(key) and val:
+                params['size_gb'] = val
+            elif self.re_disk_vgname.search(key):
+                params['vg_name'] = val
+            elif self.re_disk_lvname.search(key):
+                params['lv_name'] = val
+            elif self.re_disk_mountpoint.search(key):
+                params['mountpoint'] = val
+            elif self.re_disk_fstype.search(key) and val:
+                params['fs_type'] = val
+
+        if self.verbose > 2:
+            LOG.debug(_("Using parameters for init data disk:") + "\n" + pp(params))
+
+        params['appname'] = self.appname
+        params['verbose'] = self.verbose
+        params['base_dir'] = self.base_dir
+
+        disk = TerraformDisk(**params)
+        if self.verbose > 2:
+            LOG.debug(_("Got data disk:") + "\n" + pp(disk.as_dict()))
+        self.disks[unit_number] = disk
+
+
+# =============================================================================
+if __name__ == "__main__":
+
+    pass
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 list
diff --git a/lib/cr_tf/xlate.py b/lib/cr_tf/xlate.py
new file mode 100644 (file)
index 0000000..5e02df2
--- /dev/null
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@author: Frank Brehm
+@contact: frank.brehm@pixelpark.com
+@copyright: © 2019 by Frank Brehm, Berlin
+@summary: The module for i18n.
+          It provides translation object, usable from all other
+          modules in this package.
+"""
+from __future__ import absolute_import, print_function
+
+# Standard modules
+import logging
+import gettext
+
+from pathlib import Path
+
+# Third party modules
+from babel.support import Translations
+
+DOMAIN = 'pp_provisioning'
+
+LOG = logging.getLogger(__name__)
+
+__version__ = '1.0.2'
+
+__me__ = Path(__file__).resolve()
+__module_dir__ = __me__.parent
+__lib_dir__ = __module_dir__.parent
+__base_dir__ = __lib_dir__.parent
+LOCALE_DIR = __base_dir__.joinpath('locale')
+if not LOCALE_DIR.is_dir():
+    LOCALE_DIR = __module_dir__.joinpath('locale')
+    if not LOCALE_DIR.is_dir():
+        LOCALE_DIR = None
+
+__mo_file__ = gettext.find(DOMAIN, str(LOCALE_DIR))
+if __mo_file__:
+    try:
+        with open(__mo_file__, 'rb') as F:
+            XLATOR = Translations(F, DOMAIN)
+    except FileNotFoundError:
+        XLATOR = gettext.NullTranslations()
+else:
+    XLATOR = gettext.NullTranslations()
+
+_ = XLATOR.gettext
+
+# =============================================================================
+
+if __name__ == "__main__":
+
+    print(_("Module directory: {!r}").format(__module_dir__))
+    print(_("Base directory: {!r}").format(__base_dir__))
+    print(_("Locale directory: {!r}").format(LOCALE_DIR))
+    print(_("Locale domain: {!r}").format(DOMAIN))
+    print(_("Found .mo-file: {!r}").format(__mo_file__))
+
+# =============================================================================
+
+# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4