summaryrefslogtreecommitdiffstats
path: root/utils
diff options
context:
space:
mode:
Diffstat (limited to 'utils')
-rw-r--r--utils/src/ooinstall/cli_installer.py174
-rw-r--r--utils/src/ooinstall/oo_config.py4
-rw-r--r--utils/src/ooinstall/openshift_ansible.py18
-rw-r--r--utils/test/cli_installer_tests.py433
-rw-r--r--utils/test/fixture.py221
5 files changed, 553 insertions, 297 deletions
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 0b38f706c..ee962c21a 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -79,21 +79,32 @@ def collect_hosts(version=None, masters_set=False, print_summary=True):
Returns: a list of host information collected from the user
"""
- min_masters_for_ha = 3
click.clear()
- click.echo('***Host Configuration***')
+ click.echo('*** Host Configuration ***')
message = """
-The OpenShift Master serves the API and web console. It also coordinates the
-jobs that have to run across the environment. It can even run the datastore.
-For wizard based installations the database will be embedded. It's possible to
-change this later using etcd from Red Hat Enterprise Linux 7.
+You must now specify the hosts that will compose your OpenShift cluster.
+
+Please enter an IP or hostname to connect to for each system in the cluster.
+You will then be prompted to identify what role you would like this system to
+serve in the cluster.
+
+OpenShift Masters serve the API and web console and coordinate the jobs to run
+across the environment. If desired you can specify multiple Master systems for
+an HA deployment, in which case you will be prompted to identify a *separate*
+system to act as the load balancer for your cluster after all Masters and Nodes
+are defined.
+
+If only one Master is specified, an etcd instance embedded within the OpenShift
+Master service will be used as the datastore. This can be later replaced with a
+separate etcd instance if desired. If multiple Masters are specified, a
+separate etcd cluster will be configured with each Master serving as a member.
Any Masters configured as part of this installation process will also be
configured as Nodes. This is so that the Master will be able to proxy to Pods
from the API. By default this Node will be unschedulable but this can be changed
after installation with 'oadm manage-node'.
-The OpenShift Node provides the runtime environments for containers. It will
+OpenShift Nodes provide the runtime environments for containers. They will
host the required services to be managed by the Master.
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
@@ -106,8 +117,7 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
num_masters = 0
while more_hosts:
host_props = {}
- host_props['connect_to'] = click.prompt('Enter hostname or IP address:',
- default='',
+ host_props['connect_to'] = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_hostname)
if not masters_set:
@@ -115,7 +125,7 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
host_props['master'] = True
num_masters += 1
- if num_masters >= min_masters_for_ha or version == '3.0':
+ if version == '3.0':
masters_set = True
host_props['node'] = True
@@ -134,31 +144,77 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
hosts.append(host)
if print_summary:
- click.echo('')
- click.echo('Current Masters: {}'.format(num_masters))
- click.echo('Current Nodes: {}'.format(len(hosts)))
- click.echo('Additional Masters required for HA: {}'.format(max(min_masters_for_ha - num_masters, 0)))
- click.echo('')
+ print_host_summary(hosts)
- if num_masters <= 1 or num_masters >= min_masters_for_ha:
+ # If we have one master, this is enough for an all-in-one deployment,
+ # thus we can start asking if you wish to proceed. Otherwise we assume
+ # you must.
+ if masters_set or num_masters != 2:
more_hosts = click.confirm('Do you want to add additional hosts?')
- if num_masters > 1:
- hosts.append(collect_master_lb())
+ if num_masters >= 3:
+ collect_master_lb(hosts)
return hosts
-def collect_master_lb():
+
+def print_host_summary(hosts):
+ masters = [host for host in hosts if host.master]
+ nodes = [host for host in hosts if host.node]
+ click.echo('')
+ click.echo('OpenShift Masters: %s' % len(masters))
+ for host in masters:
+ click.echo(' %s' % host.connect_to)
+ click.echo('OpenShift Nodes: %s' % len(nodes))
+ for host in nodes:
+ click.echo(' %s' % host.connect_to)
+
+ click.echo("")
+
+ if len(masters) == 1:
+ click.echo("NOTE: Add a total of 3 or more Masters to perform an HA"
+ " installation.")
+ elif len(masters) == 2:
+ min_masters_message = """
+WARNING: A minimum of 3 masters are required to perform an HA installation.
+Please add one more to proceed.
+"""
+ click.echo(min_masters_message)
+ elif len(masters) >= 3:
+ ha_message = """
+NOTE: Multiple Masters specified, this will be an HA deployment with a separate
+etcd cluster. You will be prompted to provide the FQDN of a load balancer once
+finished entering hosts.
+"""
+ click.echo(ha_message)
+
+ dedicated_nodes_message = """
+WARNING: Dedicated Nodes are recommended for an HA deployment. If no dedicated
+Nodes are specified, each configured Master will be marked as a schedulable
+Node.
+"""
+ dedicated_nodes = [host for host in hosts if host.node and not host.master]
+ if len(dedicated_nodes) == 0:
+ click.echo(dedicated_nodes_message)
+
+ click.echo('')
+
+
+def collect_master_lb(hosts):
"""
- Get an HA proxy from the user
+ Get a valid load balancer from the user and append it to the list of
+ hosts.
+
+ Ensure user does not specify a system already used as a master/node as
+ this is an invalid configuration.
"""
message = """
Setting up High Availability Masters requires a load balancing solution.
-Please provide a host that will be configured as a proxy. This can either be
-an existing load balancer configured to balance all masters on port 8443 or a
-new host that will have HAProxy installed on it.
+Please provide a the FQDN of a host that will be configured as a proxy. This
+can be either an existing load balancer configured to balance all masters on
+port 8443 or a new host that will have HAProxy installed on it.
-If the host provided does is not yet configured a reference haproxy load
+If the host provided does is not yet configured, a reference haproxy load
balancer will be installed. It's important to note that while the rest of the
environment will be fault tolerant this reference load balancer will not be.
It can be replaced post-installation with a load balancer with the same
@@ -166,17 +222,28 @@ hostname.
"""
click.echo(message)
host_props = {}
- host_props['connect_to'] = click.prompt('Enter hostname or IP address:',
- default='',
- value_proc=validate_prompt_hostname)
+
+ # Using an embedded function here so we have access to the hosts list:
+ def validate_prompt_lb(hostname):
+ # Run the standard hostname check first:
+ hostname = validate_prompt_hostname(hostname)
+
+ # Make sure this host wasn't already specified:
+ for host in hosts:
+ if host.connect_to == hostname and (host.master or host.node):
+ raise click.BadParameter('Cannot re-use "%s" as a load balancer, '
+ 'please specify a separate host' % hostname)
+ return hostname
+
+ host_props['connect_to'] = click.prompt('Enter hostname or IP address',
+ value_proc=validate_prompt_lb)
install_haproxy = click.confirm('Should the reference haproxy load balancer be installed on this host?')
host_props['preconfigured'] = not install_haproxy
host_props['master'] = False
host_props['node'] = False
host_props['master_lb'] = True
master_lb = Host(**host_props)
-
- return master_lb
+ hosts.append(master_lb)
def confirm_hosts_facts(oo_cfg, callback_facts):
hosts = oo_cfg.hosts
@@ -249,35 +316,44 @@ Edit %s with the desired values and run `atomic-openshift-installer --unattended
-def check_hosts_config(oo_cfg):
+def check_hosts_config(oo_cfg, unattended):
click.clear()
masters = [host for host in oo_cfg.hosts if host.master]
+
+ if len(masters) == 2:
+ click.echo("A minimum of 3 Masters are required for HA deployments.")
+ sys.exit(1)
+
if len(masters) > 1:
master_lb = [host for host in oo_cfg.hosts if host.master_lb]
if len(master_lb) > 1:
- click.echo('More than one Master load balancer specified. Only one is allowed.')
- sys.exit(0)
+ click.echo('ERROR: More than one Master load balancer specified. Only one is allowed.')
+ sys.exit(1)
elif len(master_lb) == 1:
if master_lb[0].master or master_lb[0].node:
- click.echo('The Master load balancer is configured as a master or node. Please correct this.')
- sys.exit(0)
+ click.echo('ERROR: The Master load balancer is configured as a master or node. Please correct this.')
+ sys.exit(1)
else:
message = """
-No HAProxy given in config. Either specify one or provide a load balancing solution
-of your choice to balance the master API (port 8443) on all master hosts.
+ERROR: No master load balancer specified in config. You must provide the FQDN
+of a load balancer to balance the API (port 8443) on all Master hosts.
https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters
"""
- confirm_continue(message)
+ click.echo(message)
+ sys.exit(1)
- nodes = [host for host in oo_cfg.hosts if host.node]
- if len(masters) == len(nodes):
+ dedicated_nodes = [host for host in oo_cfg.hosts if host.node and not host.master]
+ if len(dedicated_nodes) == 0:
message = """
-No dedicated Nodes specified. By default, colocated Masters have their Nodes
-set to unschedulable. Continuing at this point will label all nodes as
-schedulable.
+WARNING: No dedicated Nodes specified. By default, colocated Masters have
+their Nodes set to unschedulable. If you proceed all nodes will be labelled
+as schedulable.
"""
- confirm_continue(message)
+ if unattended:
+ click.echo(message)
+ else:
+ confirm_continue(message)
return
@@ -301,7 +377,8 @@ def get_variant_and_version(multi_master=False):
return product, version
def confirm_continue(message):
- click.echo(message)
+ if message:
+ click.echo(message)
click.confirm("Are you ready to continue?", default=False, abort=True)
return
@@ -391,7 +468,7 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h
def collect_new_nodes():
click.clear()
- click.echo('***New Node Configuration***')
+ click.echo('*** New Node Configuration ***')
message = """
Add new nodes here
"""
@@ -639,9 +716,10 @@ def install(ctx, force):
else:
oo_cfg = get_missing_info_from_user(oo_cfg)
- check_hosts_config(oo_cfg)
+ check_hosts_config(oo_cfg, ctx.obj['unattended'])
click.echo('Gathering information from hosts...')
+ print_host_summary(oo_cfg.hosts)
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
verbose)
if error:
@@ -666,8 +744,8 @@ def install(ctx, force):
click.echo('Ready to run installation process.')
message = """
-If changes are needed to the values recorded by the installer please update {}.
-""".format(oo_cfg.config_path)
+If changes are needed please edit the config file above and re-run.
+"""
if not ctx.obj['unattended']:
confirm_continue(message)
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index b6f0cdce3..37aaf9197 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -50,8 +50,8 @@ class Host(object):
self.containerized = kwargs.get('containerized', False)
if self.connect_to is None:
- raise OOConfigInvalidHostError("You must specify either and 'ip' " \
- "or 'hostname' to connect to.")
+ raise OOConfigInvalidHostError("You must specify either an ip " \
+ "or hostname as 'connect_to'")
if self.master is False and self.node is False and self.master_lb is False:
raise OOConfigInvalidHostError(
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 4aa60922d..c5257f1db 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -62,12 +62,12 @@ def generate_inventory(hosts):
# and store it on the Node object.
if set(nodes) == set(masters):
for node in nodes:
- write_host(node, base_inventory)
+ write_host(node, base_inventory, True)
else:
for node in nodes:
# TODO: Until the Master can run the SDN itself we have to configure the Masters
# as Nodes too.
- schedulable = True
+ schedulable = None
if node in masters:
schedulable = False
write_host(node, base_inventory, schedulable)
@@ -106,13 +106,13 @@ def write_inventory_vars(base_inventory, multiple_masters, proxy):
base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user']))
if CFG.settings['ansible_ssh_user'] != 'root':
base_inventory.write('ansible_become=true\n')
- if multiple_masters:
+ if multiple_masters and proxy is not None:
base_inventory.write('openshift_master_cluster_method=native\n')
base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname))
base_inventory.write("openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname))
-def write_host(host, inventory, schedulable=True):
+def write_host(host, inventory, schedulable=None):
global CFG
facts = ''
@@ -126,8 +126,16 @@ def write_host(host, inventory, schedulable=True):
facts += ' openshift_public_hostname={}'.format(host.public_hostname)
# TODO: For not write_host is handles both master and nodes.
# Technically only nodes will ever need this.
- if not schedulable:
+
+ # Distinguish between three states, no schedulability specified (use default),
+ # explicitly set to True, or explicitly set to False:
+ if schedulable is None:
+ pass
+ elif schedulable:
+ facts += ' openshift_schedulable=True'
+ elif not schedulable:
facts += ' openshift_schedulable=False'
+
installer_host = socket.gethostname()
if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
facts += ' ansible_connection=local'
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index 90b6b15a3..d028bf472 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -5,12 +5,10 @@
import copy
import os
import ConfigParser
-import yaml
import ooinstall.cli_installer as cli
-from click.testing import CliRunner
-from test.oo_config_tests import OOInstallFixture
+from test.fixture import OOCliFixture, SAMPLE_CONFIG, build_input, read_yaml
from mock import patch
@@ -76,8 +74,32 @@ MOCK_FACTS_QUICKHA = {
},
}
-# Substitute in a product name before use:
-SAMPLE_CONFIG = """
+# Missing connect_to on some hosts:
+BAD_CONFIG = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ node: true
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+"""
+
+QUICKHA_CONFIG = """
variant: %s
ansible_ssh_user: root
hosts:
@@ -93,6 +115,7 @@ hosts:
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
+ master: true
node: true
- connect_to: 10.0.0.3
ip: 10.0.0.3
@@ -100,9 +123,22 @@ hosts:
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
+ master: true
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
+ node: true
+ - connect_to: 10.0.0.5
+ ip: 10.0.0.5
+ hostname: proxy-private.example.com
+ public_ip: 24.222.0.5
+ public_hostname: proxy.example.com
+ master_lb: true
"""
-BAD_CONFIG = """
+QUICKHA_2_MASTER_CONFIG = """
variant: %s
ansible_ssh_user: root
hosts:
@@ -113,20 +149,56 @@ hosts:
public_hostname: master.example.com
master: true
node: true
- - ip: 10.0.0.2
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
node: true
+ - connect_to: 10.0.0.5
+ ip: 10.0.0.5
+ hostname: proxy-private.example.com
+ public_ip: 24.222.0.5
+ public_hostname: proxy.example.com
+ master_lb: true
+"""
+
+QUICKHA_CONFIG_REUSED_LB = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ master: true
+ node: true
+ master_lb: true
- connect_to: 10.0.0.3
ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
+ master: true
"""
-QUICKHA_CONFIG = """
+QUICKHA_CONFIG_NO_LB = """
variant: %s
ansible_ssh_user: root
hosts:
@@ -150,116 +222,9 @@ hosts:
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
- - connect_to: 10.0.0.4
- ip: 10.0.0.4
- hostname: proxy-private.example.com
- public_ip: 24.222.0.4
- public_hostname: proxy.example.com
- master_lb: true
+ master: true
"""
-class OOCliFixture(OOInstallFixture):
-
- def setUp(self):
- OOInstallFixture.setUp(self)
- self.runner = CliRunner()
-
- # Add any arguments you would like to test here, the defaults ensure
- # we only do unattended invocations here, and using temporary files/dirs.
- self.cli_args = ["-a", self.work_dir]
-
- def run_cli(self):
- return self.runner.invoke(cli.cli, self.cli_args)
-
- def assert_result(self, result, exit_code):
- if result.exception is not None or result.exit_code != exit_code:
- print "Unexpected result from CLI execution"
- print "Exit code: %s" % result.exit_code
- print "Exception: %s" % result.exception
- print result.exc_info
- import traceback
- traceback.print_exception(*result.exc_info)
- print "Output:\n%s" % result.output
- self.fail("Exception during CLI execution")
-
- def _read_yaml(self, config_file_path):
- f = open(config_file_path, 'r')
- config = yaml.safe_load(f.read())
- f.close()
- return config
-
- def _verify_load_facts(self, load_facts_mock):
- """ Check that we ran load facts with expected inputs. """
- load_facts_args = load_facts_mock.call_args[0]
- self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
- load_facts_args[0])
- self.assertEquals(os.path.join(self.work_dir,
- "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
- env_vars = load_facts_args[2]
- self.assertEquals(os.path.join(self.work_dir,
- '.ansible/callback_facts.yaml'),
- env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
- self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
-
- def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
- """ Check that we ran playbook with expected inputs. """
- hosts = run_playbook_mock.call_args[0][0]
- hosts_to_run_on = run_playbook_mock.call_args[0][1]
- self.assertEquals(exp_hosts_len, len(hosts))
- self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
-
- def _verify_config_hosts(self, written_config, host_count):
- print written_config['hosts']
- self.assertEquals(host_count, len(written_config['hosts']))
- for h in written_config['hosts']:
- self.assertTrue('hostname' in h)
- self.assertTrue('public_hostname' in h)
- if 'preconfigured' not in h:
- self.assertTrue(h['node'])
- self.assertTrue('ip' in h)
- self.assertTrue('public_ip' in h)
-
- #pylint: disable=too-many-arguments
- def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
- run_playbook_mock, cli_input,
- exp_hosts_len=None, exp_hosts_to_run_on_len=None,
- force=None):
- """
- Tests cli_installer.py:get_hosts_to_run_on. That method has quite a
- few subtle branches in the logic. The goal with this method is simply
- to handle all the messy stuff here and allow the main test cases to be
- easily read. The basic idea is to modify mock_facts to return a
- version indicating OpenShift is already installed on particular hosts.
- """
- load_facts_mock.return_value = (mock_facts, 0)
- run_playbook_mock.return_value = 0
-
- if cli_input:
- self.cli_args.append("install")
- result = self.runner.invoke(cli.cli,
- self.cli_args,
- input=cli_input)
- else:
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
-
- self.cli_args.extend(["-c", config_file, "install"])
- if force:
- self.cli_args.append("--force")
- result = self.runner.invoke(cli.cli, self.cli_args)
- written_config = self._read_yaml(config_file)
- self._verify_config_hosts(written_config, exp_hosts_len)
-
- self.assert_result(result, 0)
- self._verify_load_facts(load_facts_mock)
- self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len)
-
- # Make sure we ran on the expected masters and nodes:
- hosts = run_playbook_mock.call_args[0][0]
- hosts_to_run_on = run_playbook_mock.call_args[0][1]
- self.assertEquals(exp_hosts_len, len(hosts))
- self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
-
class UnattendedCliTests(OOCliFixture):
def setUp(self):
@@ -438,7 +403,7 @@ class UnattendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
- written_config = self._read_yaml(config_file)
+ written_config = read_yaml(config_file)
self.assertEquals('openshift-enterprise', written_config['variant'])
# We didn't specify a version so the latest should have been assumed,
@@ -467,7 +432,7 @@ class UnattendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
- written_config = self._read_yaml(config_file)
+ written_config = read_yaml(config_file)
self.assertEquals('openshift-enterprise', written_config['variant'])
# Make sure our older version was preserved:
@@ -569,10 +534,11 @@ class UnattendedCliTests(OOCliFixture):
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
- assert result.exit_code == 1
- assert result.output == "You must specify either and 'ip' or 'hostname' to connect to.\n"
+ self.assertEquals(1, result.exit_code)
+ self.assertTrue("You must specify either an ip or hostname"
+ in result.output)
- #unattended with two masters, one node, and haproxy
+ #unattended with three masters, one node, and haproxy
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_full_run(self, load_facts_mock, run_playbook_mock):
@@ -586,25 +552,62 @@ class UnattendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
- load_facts_args = load_facts_mock.call_args[0]
- self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
- load_facts_args[0])
- self.assertEquals(os.path.join(self.work_dir,
- "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
- env_vars = load_facts_args[2]
- self.assertEquals(os.path.join(self.work_dir,
- '.ansible/callback_facts.yaml'),
- env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
- self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
- # If user running test has rpm installed, this might be set to default:
- self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
- env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
-
# Make sure we ran on the expected masters and nodes:
hosts = run_playbook_mock.call_args[0][0]
hosts_to_run_on = run_playbook_mock.call_args[0][1]
- self.assertEquals(4, len(hosts))
- self.assertEquals(4, len(hosts_to_run_on))
+ self.assertEquals(5, len(hosts))
+ self.assertEquals(5, len(hosts_to_run_on))
+
+ #unattended with two masters, one node, and haproxy
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_only_2_masters(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_2_MASTER_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is an invalid config:
+ self.assert_result(result, 1)
+ self.assertTrue("A minimum of 3 Masters are required" in result.output)
+
+ #unattended with three masters, one node, but no load balancer specified:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_no_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG_NO_LB % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is not a valid input:
+ self.assert_result(result, 1)
+ self.assertTrue('No master load balancer specified in config' in result.output)
+
+ #unattended with three masters, one node, and one of the masters reused as load balancer:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_reused_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG_REUSED_LB % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is not a valid configuration:
+ self.assert_result(result, 1)
+
class AttendedCliTests(OOCliFixture):
@@ -614,84 +617,13 @@ class AttendedCliTests(OOCliFixture):
self.config_file = os.path.join(self.work_dir, 'config.yml')
self.cli_args.extend(["-c", self.config_file])
- #pylint: disable=too-many-arguments,too-many-branches
- def _build_input(self, ssh_user=None, hosts=None, variant_num=None,
- add_nodes=None, confirm_facts=None, schedulable_masters_ok=None,
- master_lb=None):
- """
- Builds a CLI input string with newline characters to simulate
- the full run.
- This gives us only one place to update when the input prompts change.
- """
-
- inputs = [
- 'y', # let's proceed
- ]
- if ssh_user:
- inputs.append(ssh_user)
-
- if variant_num:
- inputs.append(str(variant_num)) # Choose variant + version
-
- num_masters = 0
- if hosts:
- i = 0
- min_masters_for_ha = 3
- for (host, is_master) in hosts:
- inputs.append(host)
- if is_master:
- inputs.append('y')
- num_masters += 1
- else:
- inputs.append('n')
- #inputs.append('rpm')
- if i < len(hosts) - 1:
- if num_masters <= 1 or num_masters >= min_masters_for_ha:
- inputs.append('y') # Add more hosts
- else:
- inputs.append('n') # Done adding hosts
- i += 1
-
- if master_lb:
- inputs.append(master_lb[0])
- inputs.append('y' if master_lb[1] else 'n')
-
- # TODO: support option 2, fresh install
- if add_nodes:
- if schedulable_masters_ok:
- inputs.append('y')
- inputs.append('1') # Add more nodes
- i = 0
- for (host, is_master) in add_nodes:
- inputs.append(host)
- #inputs.append('rpm')
- if i < len(add_nodes) - 1:
- inputs.append('y') # Add more hosts
- else:
- inputs.append('n') # Done adding hosts
- i += 1
-
- if add_nodes is None:
- total_hosts = hosts
- else:
- total_hosts = hosts + add_nodes
- if total_hosts is not None and num_masters == len(total_hosts):
- inputs.append('y')
-
- inputs.extend([
- confirm_facts,
- 'y', # lets do this
- ])
-
- return '\n'.join(inputs)
-
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_full_run(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', False),
('10.0.0.3', False)],
@@ -706,7 +638,7 @@ class AttendedCliTests(OOCliFixture):
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 3, 3)
- written_config = self._read_yaml(self.config_file)
+ written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 3)
inventory = ConfigParser.ConfigParser(allow_no_value=True)
@@ -732,7 +664,7 @@ class AttendedCliTests(OOCliFixture):
load_facts_mock.return_value = (mock_facts, 0)
run_playbook_mock.return_value = 0
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', False),
],
@@ -744,13 +676,12 @@ class AttendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli,
self.cli_args,
input=cli_input)
- print result
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 3, 2)
- written_config = self._read_yaml(self.config_file)
+ written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 3)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@@ -762,7 +693,7 @@ class AttendedCliTests(OOCliFixture):
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'),
SAMPLE_CONFIG % 'openshift-enterprise')
- cli_input = self._build_input(confirm_facts='y')
+ cli_input = build_input(confirm_facts='y')
self.cli_args.extend(["-c", config_file])
self.cli_args.append("install")
result = self.runner.invoke(cli.cli,
@@ -773,7 +704,7 @@ class AttendedCliTests(OOCliFixture):
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 3, 3)
- written_config = self._read_yaml(config_file)
+ written_config = read_yaml(config_file)
self._verify_config_hosts(written_config, 3)
#interactive with config file and all installed hosts
@@ -784,7 +715,7 @@ class AttendedCliTests(OOCliFixture):
mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True),
],
add_nodes=[('10.0.0.2', False)],
@@ -803,15 +734,15 @@ class AttendedCliTests(OOCliFixture):
#interactive multimaster: one more node than master
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
- def test_quick_ha1(self, load_facts_mock, run_playbook_mock):
+ def test_ha_dedicated_node(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', True),
- ('10.0.0.3', False),
- ('10.0.0.4', True)],
+ ('10.0.0.3', True),
+ ('10.0.0.4', False)],
ssh_user='root',
variant_num=1,
confirm_facts='y',
@@ -824,7 +755,7 @@ class AttendedCliTests(OOCliFixture):
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 5, 5)
- written_config = self._read_yaml(self.config_file)
+ written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 5)
inventory = ConfigParser.ConfigParser(allow_no_value=True)
@@ -833,21 +764,22 @@ class AttendedCliTests(OOCliFixture):
inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
self.assertEquals('False',
inventory.get('nodes', '10.0.0.2 openshift_schedulable'))
- self.assertEquals(None,
- inventory.get('nodes', '10.0.0.3'))
self.assertEquals('False',
- inventory.get('nodes', '10.0.0.4 openshift_schedulable'))
+ inventory.get('nodes', '10.0.0.3 openshift_schedulable'))
+ self.assertEquals(None,
+ inventory.get('nodes', '10.0.0.4'))
- return
+ self.assertTrue(inventory.has_section('etcd'))
+ self.assertEquals(3, len(inventory.items('etcd')))
- #interactive multimaster: equal number masters and nodes
+ #interactive multimaster: identical masters and nodes
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
- def test_quick_ha2(self, load_facts_mock, run_playbook_mock):
+ def test_ha_no_dedicated_nodes(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', True),
('10.0.0.3', True)],
@@ -863,19 +795,38 @@ class AttendedCliTests(OOCliFixture):
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 4, 4)
- written_config = self._read_yaml(self.config_file)
+ written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 4)
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
- self.assertEquals(None,
- inventory.get('nodes', '10.0.0.1'))
- self.assertEquals(None,
- inventory.get('nodes', '10.0.0.2'))
- self.assertEquals(None,
- inventory.get('nodes', '10.0.0.3'))
+ self.assertEquals('True',
+ inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
+ self.assertEquals('True',
+ inventory.get('nodes', '10.0.0.2 openshift_schedulable'))
+ self.assertEquals('True',
+ inventory.get('nodes', '10.0.0.3 openshift_schedulable'))
+
+ #interactive multimaster: attempting to use a master as the load balancer should fail:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_ha_reuse_master_as_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
- return
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True),
+ ('10.0.0.2', True),
+ ('10.0.0.3', False),
+ ('10.0.0.4', True)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=(['10.0.0.2', '10.0.0.5'], False))
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
#interactive all-in-one
@patch('ooinstall.openshift_ansible.run_main_playbook')
@@ -884,7 +835,7 @@ class AttendedCliTests(OOCliFixture):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True)],
ssh_user='root',
variant_num=1,
@@ -897,15 +848,13 @@ class AttendedCliTests(OOCliFixture):
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 1, 1)
- written_config = self._read_yaml(self.config_file)
+ written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 1)
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
- self.assertEquals(None,
- inventory.get('nodes', '10.0.0.1'))
-
- return
+ self.assertEquals('True',
+ inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
# TODO: test with config file, attended add node
# TODO: test with config file, attended new node already in config file
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
new file mode 100644
index 000000000..14baec6b3
--- /dev/null
+++ b/utils/test/fixture.py
@@ -0,0 +1,221 @@
+# pylint: disable=missing-docstring
+import os
+import yaml
+
+import ooinstall.cli_installer as cli
+
+from test.oo_config_tests import OOInstallFixture
+from click.testing import CliRunner
+
+# Substitute in a product name before use:
+SAMPLE_CONFIG = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ node: true
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+"""
+
+def read_yaml(config_file_path):
+ cfg_f = open(config_file_path, 'r')
+ config = yaml.safe_load(cfg_f.read())
+ cfg_f.close()
+ return config
+
+
+class OOCliFixture(OOInstallFixture):
+
+ def setUp(self):
+ OOInstallFixture.setUp(self)
+ self.runner = CliRunner()
+
+ # Add any arguments you would like to test here, the defaults ensure
+ # we only do unattended invocations here, and using temporary files/dirs.
+ self.cli_args = ["-a", self.work_dir]
+
+ def run_cli(self):
+ return self.runner.invoke(cli.cli, self.cli_args)
+
+ def assert_result(self, result, exit_code):
+ if result.exit_code != exit_code:
+ print "Unexpected result from CLI execution"
+ print "Exit code: %s" % result.exit_code
+ print "Exception: %s" % result.exception
+ print result.exc_info
+ import traceback
+ traceback.print_exception(*result.exc_info)
+ print "Output:\n%s" % result.output
+ self.fail("Exception during CLI execution")
+
+ def _verify_load_facts(self, load_facts_mock):
+ """ Check that we ran load facts with expected inputs. """
+ load_facts_args = load_facts_mock.call_args[0]
+ self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
+ load_facts_args[0])
+ self.assertEquals(os.path.join(self.work_dir,
+ "playbooks/byo/openshift_facts.yml"),
+ load_facts_args[1])
+ env_vars = load_facts_args[2]
+ self.assertEquals(os.path.join(self.work_dir,
+ '.ansible/callback_facts.yaml'),
+ env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+ self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+
+ def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
+ """ Check that we ran playbook with expected inputs. """
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+ def _verify_config_hosts(self, written_config, host_count):
+ self.assertEquals(host_count, len(written_config['hosts']))
+ for host in written_config['hosts']:
+ self.assertTrue('hostname' in host)
+ self.assertTrue('public_hostname' in host)
+ if 'preconfigured' not in host:
+ self.assertTrue(host['node'])
+ self.assertTrue('ip' in host)
+ self.assertTrue('public_ip' in host)
+
+ #pylint: disable=too-many-arguments
+ def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
+ run_playbook_mock, cli_input,
+ exp_hosts_len=None, exp_hosts_to_run_on_len=None,
+ force=None):
+ """
+ Tests cli_installer.py:get_hosts_to_run_on. That method has quite a
+ few subtle branches in the logic. The goal with this method is simply
+ to handle all the messy stuff here and allow the main test cases to be
+ easily read. The basic idea is to modify mock_facts to return a
+ version indicating OpenShift is already installed on particular hosts.
+ """
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ if cli_input:
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli,
+ self.cli_args,
+ input=cli_input)
+ else:
+ config_file = self.write_config(
+ os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ if force:
+ self.cli_args.append("--force")
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ written_config = read_yaml(config_file)
+ self._verify_config_hosts(written_config, exp_hosts_len)
+
+ self.assert_result(result, 0)
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+
+#pylint: disable=too-many-arguments,too-many-branches
+def build_input(ssh_user=None, hosts=None, variant_num=None,
+ add_nodes=None, confirm_facts=None, scheduleable_masters_ok=None,
+ master_lb=None):
+ """
+ Build an input string simulating a user entering values in an interactive
+ attended install.
+
+ This is intended to give us one place to update when the CLI prompts change.
+ We should aim to keep this dependent on optional keyword arguments with
+ sensible defaults to keep things from getting too fragile.
+ """
+
+ inputs = [
+ 'y', # let's proceed
+ ]
+ if ssh_user:
+ inputs.append(ssh_user)
+
+ if variant_num:
+ inputs.append(str(variant_num)) # Choose variant + version
+
+ num_masters = 0
+ if hosts:
+ i = 0
+ for (host, is_master) in hosts:
+ inputs.append(host)
+ if is_master:
+ inputs.append('y')
+ num_masters += 1
+ else:
+ inputs.append('n')
+ #inputs.append('rpm')
+ # We should not be prompted to add more hosts if we're currently at
+ # 2 masters, this is an invalid HA configuration, so this question
+ # will not be asked, and the user must enter the next host:
+ if num_masters != 2:
+ if i < len(hosts) - 1:
+ if num_masters >= 1:
+ inputs.append('y') # Add more hosts
+ else:
+ inputs.append('n') # Done adding hosts
+ i += 1
+
+ # You can pass a single master_lb or a list if you intend for one to get rejected:
+ if master_lb:
+ if isinstance(master_lb[0], list) or isinstance(master_lb[0], tuple):
+ inputs.extend(master_lb[0])
+ else:
+ inputs.append(master_lb[0])
+ inputs.append('y' if master_lb[1] else 'n')
+
+ # TODO: support option 2, fresh install
+ if add_nodes:
+ if scheduleable_masters_ok:
+ inputs.append('y')
+ inputs.append('1') # Add more nodes
+ i = 0
+ for (host, is_master) in add_nodes:
+ inputs.append(host)
+ #inputs.append('rpm')
+ if i < len(add_nodes) - 1:
+ inputs.append('y') # Add more hosts
+ else:
+ inputs.append('n') # Done adding hosts
+ i += 1
+
+ if add_nodes is None:
+ total_hosts = hosts
+ else:
+ total_hosts = hosts + add_nodes
+ if total_hosts is not None and num_masters == len(total_hosts):
+ inputs.append('y')
+
+ inputs.extend([
+ confirm_facts,
+ 'y', # lets do this
+ ])
+
+ return '\n'.join(inputs)
+