summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--bin/README_SHELL_COMPLETION2
-rw-r--r--bin/openshift_ansible.conf.example2
-rw-r--r--bin/openshift_ansible/awsutil.py11
-rwxr-xr-xbin/ossh_bash_completion20
-rw-r--r--bin/ossh_zsh_completion10
-rw-r--r--bin/zsh_functions/_ossh4
-rw-r--r--inventory/multi_ec2.yaml.example32
-rwxr-xr-xinventory/multi_inventory.py (renamed from inventory/multi_ec2.py)144
-rw-r--r--inventory/multi_inventory.yaml.example51
-rw-r--r--openshift-ansible.spec10
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml10
-rw-r--r--test/units/README.md2
-rwxr-xr-xtest/units/multi_inventory_test.py114
-rwxr-xr-xtest/units/mutli_ec2_test.py95
14 files changed, 294 insertions, 213 deletions
diff --git a/bin/README_SHELL_COMPLETION b/bin/README_SHELL_COMPLETION
index 5f05df7fc..49bba3acc 100644
--- a/bin/README_SHELL_COMPLETION
+++ b/bin/README_SHELL_COMPLETION
@@ -14,7 +14,7 @@ will populate the cache file and the completions should
become available.
This script will look at the cached version of your
-multi_ec2 results in ~/.ansible/tmp/multi_ec2_inventory.cache.
+multi_inventory results in ~/.ansible/tmp/multi_inventory.cache.
It will then parse a few {host}.{env} out of the json
and return them to be completable.
diff --git a/bin/openshift_ansible.conf.example b/bin/openshift_ansible.conf.example
index e891b855a..8786dfc13 100644
--- a/bin/openshift_ansible.conf.example
+++ b/bin/openshift_ansible.conf.example
@@ -1,5 +1,5 @@
#[main]
-#inventory = /usr/share/ansible/inventory/multi_ec2.py
+#inventory = /usr/share/ansible/inventory/multi_inventory.py
#[host_type_aliases]
#host-type-one = aliasa,aliasb
diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py
index 9df034f57..45345007c 100644
--- a/bin/openshift_ansible/awsutil.py
+++ b/bin/openshift_ansible/awsutil.py
@@ -4,7 +4,10 @@
import os
import re
-from openshift_ansible import multi_ec2
+
+# Buildbot does not have multi_inventory installed
+#pylint: disable=no-name-in-module
+from openshift_ansible import multi_inventory
class ArgumentError(Exception):
"""This class is raised when improper arguments are passed."""
@@ -49,9 +52,9 @@ class AwsUtil(object):
Keyword arguments:
args -- optional arguments to pass to the inventory script
"""
- mec2 = multi_ec2.MultiEc2(args)
- mec2.run()
- return mec2.result
+ minv = multi_inventory.MultiInventory(args)
+ minv.run()
+ return minv.result
def get_environments(self):
"""Searches for env tags in the inventory and returns all of the envs found."""
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
index 5072161f0..997ff0f9c 100755
--- a/bin/ossh_bash_completion
+++ b/bin/ossh_bash_completion
@@ -1,12 +1,12 @@
__ossh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+ /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
- elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+ elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
- elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+ elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
fi
}
@@ -26,13 +26,13 @@ complete -F _ossh ossh oscp
__opssh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible.multi_ec2 import MultiEc2; m=MultiEc2(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
- elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
- elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
fi
}
diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion
index 44500c618..3c4018636 100644
--- a/bin/ossh_zsh_completion
+++ b/bin/ossh_zsh_completion
@@ -2,13 +2,13 @@
_ossh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- print $(/usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+ print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
- elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+ elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
- elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+ elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
fi
diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh
index 7c6cb7b0b..d205e1055 100644
--- a/bin/zsh_functions/_ossh
+++ b/bin/zsh_functions/_ossh
@@ -1,8 +1,8 @@
#compdef ossh oscp
_ossh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
+ if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
fi
}
diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example
deleted file mode 100644
index bbd81ad20..000000000
--- a/inventory/multi_ec2.yaml.example
+++ /dev/null
@@ -1,32 +0,0 @@
-# multi ec2 inventory configs
-#
-cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache
-
-accounts:
- - name: aws1
- provider: aws/hosts/ec2.py
- provider_config:
- ec2:
- regions: all
- regions_exclude: us-gov-west-1,cn-north-1
- destination_variable: public_dns_name
- route53: False
- cache_path: ~/.ansible/tmp
- cache_max_age: 300
- vpc_destination_variable: ip_address
- env_vars:
- AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
- AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- all_group: ec2
- extra_vars:
- cloud: aws
- account: aws1
-
-- name: aws2
- provider: aws/hosts/ec2.py
- env_vars:
- AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
- AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- EC2_INI_PATH: /etc/ansible/ec2.ini
-
-cache_max_age: 60
diff --git a/inventory/multi_ec2.py b/inventory/multi_inventory.py
index 98dde3f3c..354a8c10c 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_inventory.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python2
'''
- Fetch and combine multiple ec2 account settings into a single
+ Fetch and combine multiple inventory account settings into a single
json hash.
'''
# vim: expandtab:tabstop=4:shiftwidth=4
@@ -15,13 +15,19 @@ import errno
import fcntl
import tempfile
import copy
+from string import Template
+import shutil
-CONFIG_FILE_NAME = 'multi_ec2.yaml'
-DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
+CONFIG_FILE_NAME = 'multi_inventory.yaml'
+DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache')
-class MultiEc2(object):
+class MultiInventoryException(Exception):
+ '''Exceptions for MultiInventory class'''
+ pass
+
+class MultiInventory(object):
'''
- MultiEc2 class:
+ MultiInventory class:
Opens a yaml config file and reads aws credentials.
Stores a json hash of resources in result.
'''
@@ -35,7 +41,7 @@ class MultiEc2(object):
self.cache_path = DEFAULT_CACHE_PATH
self.config = None
- self.all_ec2_results = {}
+ self.all_inventory_results = {}
self.result = {}
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
@@ -56,7 +62,7 @@ class MultiEc2(object):
cache is valid for the inventory.
if the cache is valid; return cache
- else the credentials are loaded from multi_ec2.yaml or from the env
+ else the credentials are loaded from multi_inventory.yaml or from the env
and we attempt to get the inventory from the provider specified.
'''
# load yaml
@@ -111,6 +117,10 @@ class MultiEc2(object):
with open(conf_file) as conf:
config = yaml.safe_load(conf)
+ # Provide a check for unique account names
+ if len(set([acc['name'] for acc in config['accounts']])) != len(config['accounts']):
+ raise MultiInventoryException('Duplicate account names in config file')
+
return config
def get_provider_tags(self, provider, env=None):
@@ -136,23 +146,25 @@ class MultiEc2(object):
else:
cmds.append('--list')
- cmds.append('--refresh-cache')
+ if 'aws' in provider.lower():
+ cmds.append('--refresh-cache')
return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
stdout=subprocess.PIPE, env=env)
@staticmethod
- def generate_config(config_data):
- """Generate the ec2.ini file in as a secure temp file.
- Once generated, pass it to the ec2.py as an environment variable.
+ def generate_config(provider_files):
+ """Generate the provider_files in a temporary directory.
"""
- fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.')
- for section, values in config_data.items():
- os.write(fildes, "[%s]\n" % section)
- for option, value in values.items():
- os.write(fildes, "%s = %s\n" % (option, value))
- os.close(fildes)
- return tmp_file_path
+ prefix = 'multi_inventory.'
+ tmp_dir_path = tempfile.mkdtemp(prefix=prefix)
+ for provider_file in provider_files:
+ filedes = open(os.path.join(tmp_dir_path, provider_file['name']), 'w+')
+ content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path)
+ filedes.write(content)
+ filedes.close()
+
+ return tmp_dir_path
def run_provider(self):
'''Setup the provider call with proper variables
@@ -160,13 +172,21 @@ class MultiEc2(object):
'''
try:
all_results = []
- tmp_file_paths = []
+ tmp_dir_paths = []
processes = {}
for account in self.config['accounts']:
- env = account['env_vars']
- if account.has_key('provider_config'):
- tmp_file_paths.append(MultiEc2.generate_config(account['provider_config']))
- env['EC2_INI_PATH'] = tmp_file_paths[-1]
+ tmp_dir = None
+ if account.has_key('provider_files'):
+ tmp_dir = MultiInventory.generate_config(account['provider_files'])
+ tmp_dir_paths.append(tmp_dir)
+
+ # Update env vars after creating provider_config_files
+ # so that we can grab the tmp_dir if it exists
+ env = account.get('env_vars', {})
+ if env and tmp_dir:
+ for key, value in env.items():
+ env[key] = Template(value).substitute(tmpdir=tmp_dir)
+
name = account['name']
provider = account['provider']
processes[name] = self.get_provider_tags(provider, env)
@@ -182,9 +202,9 @@ class MultiEc2(object):
})
finally:
- # Clean up the mkstemp file
- for tmp_file in tmp_file_paths:
- os.unlink(tmp_file)
+ # Clean up the mkdtemp dirs
+ for tmp_dir in tmp_dir_paths:
+ shutil.rmtree(tmp_dir)
return all_results
@@ -223,7 +243,7 @@ class MultiEc2(object):
]
raise RuntimeError('\n'.join(err_msg).format(**result))
else:
- self.all_ec2_results[result['name']] = json.loads(result['out'])
+ self.all_inventory_results[result['name']] = json.loads(result['out'])
# Check if user wants extra vars in yaml by
# having hostvars and all_group defined
@@ -231,29 +251,52 @@ class MultiEc2(object):
self.apply_account_config(acc_config)
# Build results by merging all dictionaries
- values = self.all_ec2_results.values()
+ values = self.all_inventory_results.values()
values.insert(0, self.result)
for result in values:
- MultiEc2.merge_destructively(self.result, result)
+ MultiInventory.merge_destructively(self.result, result)
+
+ def add_entry(self, data, keys, item):
+ ''' Add an item to a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ keys = a.b
+ item = c
+ '''
+ if "." in keys:
+ key, rest = keys.split(".", 1)
+ if key not in data:
+ data[key] = {}
+ self.add_entry(data[key], rest, item)
+ else:
+ data[keys] = item
+
+ def get_entry(self, data, keys):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ keys = a.b
+ return c
+ '''
+ if keys and "." in keys:
+ key, rest = keys.split(".", 1)
+ return self.get_entry(data[key], rest)
+ else:
+ return data.get(keys, None)
def apply_account_config(self, acc_config):
''' Apply account config settings
'''
- results = self.all_ec2_results[acc_config['name']]
+ results = self.all_inventory_results[acc_config['name']]
+ results['all_hosts'] = results['_meta']['hostvars'].keys()
# Update each hostvar with the newly desired key: value from extra_*
- for _extra in ['extra_groups', 'extra_vars']:
+ for _extra in ['extra_vars', 'extra_groups']:
for new_var, value in acc_config.get(_extra, {}).items():
- # Verify the account results look sane
- # by checking for these keys ('_meta' and 'hostvars' exist)
- if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
- for data in results['_meta']['hostvars'].values():
- data[str(new_var)] = str(value)
+ for data in results['_meta']['hostvars'].values():
+ self.add_entry(data, new_var, value)
# Add this group
- if _extra == 'extra_groups' and results.has_key(acc_config['all_group']):
- results["%s_%s" % (new_var, value)] = \
- copy.copy(results[acc_config['all_group']])
+ if _extra == 'extra_groups':
+ results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts'])
# Clone groups goes here
for to_name, from_name in acc_config.get('clone_groups', {}).items():
@@ -262,14 +305,11 @@ class MultiEc2(object):
# Clone vars goes here
for to_name, from_name in acc_config.get('clone_vars', {}).items():
- # Verify the account results look sane
- # by checking for these keys ('_meta' and 'hostvars' exist)
- if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
- for data in results['_meta']['hostvars'].values():
- data[str(to_name)] = data.get(str(from_name), 'nil')
+ for data in results['_meta']['hostvars'].values():
+ self.add_entry(data, to_name, self.get_entry(data, from_name))
- # store the results back into all_ec2_results
- self.all_ec2_results[acc_config['name']] = results
+ # store the results back into all_inventory_results
+ self.all_inventory_results[acc_config['name']] = results
@staticmethod
def merge_destructively(input_a, input_b):
@@ -277,7 +317,7 @@ class MultiEc2(object):
for key in input_b:
if key in input_a:
if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
- MultiEc2.merge_destructively(input_a[key], input_b[key])
+ MultiInventory.merge_destructively(input_a[key], input_b[key])
elif input_a[key] == input_b[key]:
pass # same leaf value
# both lists so add each element in b to a if it does ! exist
@@ -333,7 +373,7 @@ class MultiEc2(object):
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
- json_data = MultiEc2.json_format_dict(self.result, True)
+ json_data = MultiInventory.json_format_dict(self.result, True)
with open(self.cache_path, 'w') as cache:
try:
fcntl.flock(cache, fcntl.LOCK_EX)
@@ -369,7 +409,7 @@ class MultiEc2(object):
if __name__ == "__main__":
- MEC2 = MultiEc2()
- MEC2.parse_cli_args()
- MEC2.run()
- print MEC2.result_str()
+ MI2 = MultiInventory()
+ MI2.parse_cli_args()
+ MI2.run()
+ print MI2.result_str()
diff --git a/inventory/multi_inventory.yaml.example b/inventory/multi_inventory.yaml.example
new file mode 100644
index 000000000..0f0788d18
--- /dev/null
+++ b/inventory/multi_inventory.yaml.example
@@ -0,0 +1,51 @@
+# multi ec2 inventory configs
+#
+cache_location: ~/.ansible/tmp/multi_inventory.cache
+
+accounts:
+ - name: aws1
+ provider: aws/ec2.py
+ provider_files:
+ - name: ec2.ini
+ content: |-
+ [ec2]
+ regions = all
+ regions_exclude = us-gov-west-1,cn-north-1
+ destination_variable = public_dns_name
+ route53 = False
+ cache_path = ~/.ansible/tmp
+ cache_max_age = 300
+ vpc_destination_variable = ip_address
+ env_vars:
+ AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
+ AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ EC2_INI_PATH: ${tmpdir}/ec2.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+ extra_vars:
+ cloud: aws
+ account: aws1
+
+- name: mygce
+ extra_vars:
+ cloud: gce
+ account: gce1
+ env_vars:
+ GCE_INI_PATH: ${tmpdir}/gce.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+ provider: gce/gce.py
+ provider_files:
+ - name: priv_key.pem
+ contents: |-
+ -----BEGIN PRIVATE KEY-----
+ yourprivatekeydatahere
+ -----END PRIVATE KEY-----
+ - name: gce.ini
+ contents: |-
+ [gce]
+ gce_service_account_email_address = <uuid>@developer.gserviceaccount.com
+ gce_service_account_pem_file_path = ${tmpdir}/priv_key.pem # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+ gce_project_id = gce-project
+ zone = us-central1-a
+ network = default
+ gce_machine_type = n1-standard-2
+ gce_machine_image = rhel7
+
+cache_max_age: 600
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index d034e6d84..8ea9120f2 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -47,9 +47,9 @@ cp -pP bin/openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
cp -p bin/ossh_bash_completion %{buildroot}/etc/bash_completion.d
cp -p bin/openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
# Fix links
-rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
rm -f %{buildroot}%{python_sitelib}/openshift_ansible/aws
-ln -sf %{_datadir}/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+ln -sf %{_datadir}/ansible/inventory/multi_inventory.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
ln -sf %{_datadir}/ansible/inventory/aws %{buildroot}%{python_sitelib}/openshift_ansible/aws
# openshift-ansible-docs install
@@ -60,8 +60,8 @@ mkdir -p %{buildroot}/etc/ansible
mkdir -p %{buildroot}%{_datadir}/ansible/inventory
mkdir -p %{buildroot}%{_datadir}/ansible/inventory/aws
mkdir -p %{buildroot}%{_datadir}/ansible/inventory/gce
-cp -p inventory/multi_ec2.py %{buildroot}%{_datadir}/ansible/inventory
-cp -p inventory/multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
+cp -p inventory/multi_inventory.py %{buildroot}%{_datadir}/ansible/inventory
+cp -p inventory/multi_inventory.yaml.example %{buildroot}/etc/ansible/multi_inventory.yaml
cp -p inventory/aws/hosts/ec2.py %{buildroot}%{_datadir}/ansible/inventory/aws
cp -p inventory/gce/hosts/gce.py %{buildroot}%{_datadir}/ansible/inventory/gce
@@ -137,7 +137,7 @@ Ansible Inventories used with the openshift-ansible scripts and playbooks.
%files inventory
%config(noreplace) /etc/ansible/*
%dir %{_datadir}/ansible/inventory
-%{_datadir}/ansible/inventory/multi_ec2.py*
+%{_datadir}/ansible/inventory/multi_inventory.py*
%package inventory-aws
Summary: Openshift and Atomic Enterprise Ansible Inventories for AWS
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
index 9cc15c0a8..bce6a8745 100644
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -6,7 +6,7 @@
- name:
copy:
content: "{{ oo_inventory_accounts | to_nice_yaml }}"
- dest: /etc/ansible/multi_ec2.yaml
+ dest: /etc/ansible/multi_inventory.yaml
group: "{{ oo_inventory_group }}"
owner: "{{ oo_inventory_owner }}"
mode: "0640"
@@ -20,17 +20,17 @@
- file:
state: link
- src: /usr/share/ansible/inventory/multi_ec2.py
- dest: /etc/ansible/inventory/multi_ec2.py
+ src: /usr/share/ansible/inventory/multi_inventory.py
+ dest: /etc/ansible/inventory/multi_inventory.py
owner: root
group: libra_ops
# This cron uses the above location to call its job
- name: Cron to keep cache fresh
cron:
- name: 'multi_ec2_inventory'
+ name: 'multi_inventory'
minute: '*/10'
- job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null'
+ job: '/usr/share/ansible/inventory/multi_inventory.py --refresh-cache &> /dev/null'
when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
- name: Set cache location
diff --git a/test/units/README.md b/test/units/README.md
index 3bed227eb..78a02c3ea 100644
--- a/test/units/README.md
+++ b/test/units/README.md
@@ -4,4 +4,4 @@ These should be run by sourcing the env-setup:
$ source test/env-setup
Then navigate to the test/units/ directory.
-$ python -m unittest multi_ec2_test
+$ python -m unittest multi_inventory_test
diff --git a/test/units/multi_inventory_test.py b/test/units/multi_inventory_test.py
new file mode 100755
index 000000000..168cd82b7
--- /dev/null
+++ b/test/units/multi_inventory_test.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for MultiInventory
+'''
+
+import unittest
+import multi_inventory
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name
+class MultiInventoryTest(unittest.TestCase):
+ '''
+ Test class for multiInventory
+ '''
+
+# def setUp(self):
+# '''setup method'''
+# pass
+
+ def test_merge_simple_1(self):
+ '''Testing a simple merge of 2 dictionaries'''
+ a = {"key1" : 1}
+ b = {"key1" : 2}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [1, 2]})
+
+ def test_merge_b_empty(self):
+ '''Testing a merge of an emtpy dictionary'''
+ a = {"key1" : 1}
+ b = {}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": 1})
+
+ def test_merge_a_empty(self):
+ '''Testing a merge of an emtpy dictionary'''
+ b = {"key1" : 1}
+ a = {}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": 1})
+
+ def test_merge_hash_array(self):
+ '''Testing a merge of a dictionary and a dictionary with an array'''
+ a = {"key1" : {"hasha": 1}}
+ b = {"key1" : [1, 2]}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [{"hasha": 1}, 1, 2]})
+
+ def test_merge_array_hash(self):
+ '''Testing a merge of a dictionary with an array and a dictionary with a hash'''
+ a = {"key1" : [1, 2]}
+ b = {"key1" : {"hasha": 1}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [1, 2, {"hasha": 1}]})
+
+ def test_merge_keys_1(self):
+ '''Testing a merge on a dictionary for keys'''
+ a = {"key1" : [1, 2], "key2" : {"hasha": 2}}
+ b = {"key2" : {"hashb": 1}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [1, 2], "key2": {"hasha": 2, "hashb": 1}})
+
+ def test_merge_recursive_1(self):
+ '''Testing a recursive merge'''
+ a = {"a" : {"b": {"c": 1}}}
+ b = {"a" : {"b": {"c": 2}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
+
+ def test_merge_recursive_array_item(self):
+ '''Testing a recursive merge for an array'''
+ a = {"a" : {"b": {"c": [1]}}}
+ b = {"a" : {"b": {"c": 2}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
+
+ def test_merge_recursive_hash_item(self):
+ '''Testing a recursive merge for a hash'''
+ a = {"a" : {"b": {"c": {"d": 1}}}}
+ b = {"a" : {"b": {"c": 2}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
+
+ def test_merge_recursive_array_hash(self):
+ '''Testing a recursive merge for an array and a hash'''
+ a = {"a" : [{"b": {"c": 1}}]}
+ b = {"a" : {"b": {"c": 1}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
+
+ def test_merge_recursive_hash_array(self):
+ '''Testing a recursive merge for an array and a hash'''
+ a = {"a" : {"b": {"c": 1}}}
+ b = {"a" : [{"b": {"c": 1}}]}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
+
+# def tearDown(self):
+# '''TearDown method'''
+# pass
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/units/mutli_ec2_test.py b/test/units/mutli_ec2_test.py
deleted file mode 100755
index 95df93cd2..000000000
--- a/test/units/mutli_ec2_test.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python2
-
-import unittest
-import sys
-import os
-import sys
-import multi_ec2
-
-class MultiEc2Test(unittest.TestCase):
-
- def setUp(self):
- pass
-
- def test_merge_simple_1(self):
- a = {"key1" : 1}
- b = {"key1" : 2}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [1,2]})
-
- def test_merge_b_empty(self):
- a = {"key1" : 1}
- b = {}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": 1})
-
- def test_merge_a_empty(self):
- b = {"key1" : 1}
- a = {}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": 1})
-
- def test_merge_hash_array(self):
- a = {"key1" : {"hasha": 1}}
- b = {"key1" : [1,2]}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [{"hasha": 1}, 1,2]})
-
- def test_merge_array_hash(self):
- a = {"key1" : [1,2]}
- b = {"key1" : {"hasha": 1}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [1,2, {"hasha": 1}]})
-
- def test_merge_keys_1(self):
- a = {"key1" : [1,2], "key2" : {"hasha": 2}}
- b = {"key2" : {"hashb": 1}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [1,2], "key2": {"hasha": 2, "hashb": 1}})
-
- def test_merge_recursive_1(self):
- a = {"a" : {"b": {"c": 1}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": {"b": {"c": [1,2]}}})
-
- def test_merge_recursive_array_item(self):
- a = {"a" : {"b": {"c": [1]}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": {"b": {"c": [1,2]}}})
-
- def test_merge_recursive_hash_item(self):
- a = {"a" : {"b": {"c": {"d": 1}}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
-
- def test_merge_recursive_array_hash(self):
- a = {"a" : [{"b": {"c": 1}}]}
- b = {"a" : {"b": {"c": 1}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
- def test_merge_recursive_hash_array(self):
- a = {"a" : {"b": {"c": 1}}}
- b = {"a" : [{"b": {"c": 1}}]}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
- def tearDown(self):
- pass
-
-if __name__ == "__main__":
- unittest.main()