summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSuren A. Chilingaryan <csa@suren.me>2018-03-20 15:47:51 +0100
committerSuren A. Chilingaryan <csa@suren.me>2018-03-20 15:47:51 +0100
commite2c7b1305ca8495065dcf40fd2092d7c698dd6ea (patch)
treeabcaa7006a9c4b7a9add9bd0bf8c24f7f8ce048f
parent47f350bc3aa85a8bd406d95faf084df2abf74ae9 (diff)
downloadands-e2c7b1305ca8495065dcf40fd2092d7c698dd6ea.tar.gz
ands-e2c7b1305ca8495065dcf40fd2092d7c698dd6ea.tar.bz2
ands-e2c7b1305ca8495065dcf40fd2092d7c698dd6ea.tar.xz
ands-e2c7b1305ca8495065dcf40fd2092d7c698dd6ea.zip
Local volumes and StatefulSet to provision Master/Slave MySQL and Galera cluster
m---------anslib/openshift-ansible0
-rw-r--r--anslib/patches/openshift/openshift-ds-update371.patch6
-rw-r--r--docs/README91
-rw-r--r--docs/benchmarks/netpipe-hostnet-connect2clusterip.txt (renamed from docs/benchmarks/netpipe-hostnet-clusterip.txt)0
-rw-r--r--docs/benchmarks/netpipe-hostnet-connect2hostip.txt (renamed from docs/benchmarks/netpipe-hostnet-hostip.txt)0
-rw-r--r--docs/benchmarks/netpipe-pod2host-ownhost.txt124
-rw-r--r--docs/benchmarks/netpipe-pod2host.txt124
-rw-r--r--docs/benchmarks/netpipe-pod2pod-clusternet2hostnet.txt124
-rw-r--r--docs/configs.txt3
-rw-r--r--docs/databases.txt62
-rw-r--r--docs/info.txt31
-rw-r--r--docs/infrastructure.txt110
-rw-r--r--docs/links.txt16
-rw-r--r--docs/managment.txt8
-rw-r--r--docs/network.txt9
-rw-r--r--docs/performance.txt54
-rw-r--r--docs/vagrant.txt4
-rw-r--r--group_vars/OSEv3.yml10
-rw-r--r--group_vars/baremetal.yml1
-rw-r--r--playbooks/openshift-setup-project.yml1
-rw-r--r--playbooks/openshift-setup-projects.yml1
-rw-r--r--roles/ands_facts/tasks/main.yml8
-rw-r--r--roles/ands_facts/tasks/node.yml5
-rw-r--r--roles/ands_facts/tasks/nodes.yml10
-rw-r--r--roles/ands_facts/tasks/volume.yml8
-rw-r--r--roles/ands_facts/tasks/volumes.yml7
-rw-r--r--roles/ands_facts/tasks/volumes_iterate.yml13
-rw-r--r--roles/ands_kaas/00-local-volumes.yml.j267
-rw-r--r--roles/ands_kaas/tasks/do_apps.yml18
-rw-r--r--roles/ands_kaas/tasks/do_keys.yml6
-rw-r--r--roles/ands_kaas/tasks/do_project.yml77
-rw-r--r--roles/ands_kaas/tasks/do_storage.yml14
-rw-r--r--roles/ands_kaas/tasks/ocitem.yml28
-rw-r--r--roles/ands_kaas/tasks/project.yml17
-rw-r--r--roles/ands_kaas/tasks/template.yml11
-rw-r--r--roles/ands_kaas/templates/00-local-volumes.yml.j250
-rw-r--r--roles/ands_kaas/templates/50-kaas-pods.yml.j2111
-rw-r--r--roles/ands_network/defaults/main.yml2
-rw-r--r--roles/ands_network/files/galera.xml10
-rw-r--r--roles/ands_network/files/netpipe.xml6
-rw-r--r--roles/ands_network/tasks/add_names.yml28
-rw-r--r--roles/ands_network/tasks/common.yml1
-rw-r--r--roles/ands_network/tasks/firewall.yml32
-rw-r--r--roles/ands_network/tasks/firewall_service.yml13
-rw-r--r--roles/ands_network/tasks/nm_configure.yml15
-rw-r--r--roles/ands_network/tasks/nm_configure_connection.yml31
-rw-r--r--roles/openshift_resource/tasks/template.yml9
-rwxr-xr-xsetup.sh2
-rw-r--r--setup/configs/volumes.yml4
-rw-r--r--setup/projects/adei/templates/40-mysql-svc.yml.j216
-rw-r--r--setup/projects/adei/templates/40-mysql-svc.yml.j2~0
-rw-r--r--setup/projects/adei/vars/apps.yml5
-rw-r--r--setup/projects/adei/vars/globals.yml16
-rw-r--r--setup/projects/adei/vars/mysql.yml92
-rw-r--r--setup/projects/adei/vars/mysql_galera.yml (renamed from setup/projects/adei/vars/galera.yml)33
-rw-r--r--setup/projects/adei/vars/mysql_simple.yml26
-rw-r--r--setup/projects/adei/vars/phpmyadmin.yml16
-rw-r--r--setup/projects/adei/vars/pods.yml55
-rw-r--r--setup/projects/adei/vars/script.yml8
-rw-r--r--setup/projects/adei/vars/volumes.yml24
60 files changed, 1448 insertions, 225 deletions
diff --git a/anslib/openshift-ansible b/anslib/openshift-ansible
-Subproject 974da273128f43a564967716a7386b59f883254
+Subproject f9eff9e52638563f6ff75a8b3b1c07ebab80e12
diff --git a/anslib/patches/openshift/openshift-ds-update371.patch b/anslib/patches/openshift/openshift-ds-update371.patch
index a6beff3..6bbb45d 100644
--- a/anslib/patches/openshift/openshift-ds-update371.patch
+++ b/anslib/patches/openshift/openshift-ds-update371.patch
@@ -7,7 +7,7 @@ index cc2ec27..6c4ccf8 100644
tasks:
- set_fact:
- openshift_upgrade_target: '3.7'
-+ openshift_upgrade_target: '3.7.1'
++ openshift_upgrade_target: '3.7.2'
openshift_upgrade_min: '3.6'
- import_playbook: ../pre/config.yml
@@ -17,9 +17,9 @@ index 0000000..10b49c0
--- /dev/null
+++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin371.repo.j2
@@ -0,0 +1,26 @@
-+[centos-openshift-origin371]
++[centos-openshift-origin37x]
+name=CentOS OpenShift Origin
-+baseurl={{ ands_repo_url }}/openshift371/
++baseurl={{ ands_repo_url }}/openshift37/
+enabled=1
+gpgcheck=0
+
diff --git a/docs/README b/docs/README
new file mode 100644
index 0000000..4f75b5b
--- /dev/null
+++ b/docs/README
@@ -0,0 +1,91 @@
+OpenShift Platform
+------------------
+The OpenShift web frontend is running at
+ https://kaas.kit.edu:8443
+
+However, I find it simpler to use command line tool 'oc' which
+ - On RedHat platforms the package is called 'origin-clients' and is installed
+ from OpenShift repository available as package 'centos-release-openshift-origin'.
+ - For other distribut check here (we are running version 3.7)
+ https://docs.openshift.org/latest/cli_reference/get_started_cli.html#installing-the-cli
+
+Basically, it is also a good documentation to start using it.
+ https://docs.openshift.com/container-platform/3.7/dev_guide/index.html
+
+Infrastructure
+--------------
+ - We have 3 servers running with names ipekatrin[1-3].ipe.kit.edu. This is internal names. The external
+ access is provided using 2 virtual ping-poing ip's katrin[1-2].ipe.kit.edu. By default they are assigned
+ to both master servers of the cluster, but will migrate both to a single surviving server if one of the
+ masters die. This is enabled by keepalived daemon and ensures load-balancing and high-availability.
+ The domain name 'kaas.kit.edu' is resolved to both ips in round-robin fashion.
+
+ - By default, the executed service have have names in the form '<service-name>.kaas.kit.edu'. For instance,
+ you can test
+ adei-katrin.kaas.kit.edu - This is a ADEI service running on the new platform
+ adas-autogen.kaas.kit.edu - Sample ADEI with generated data
+ katrin.kaas.kit.edu - Is the placehorder for futre katrin router
+ etc.
+
+ - OpenVPN connection with KATRIN virtual network is running on master servers. Non-masters route the traffic
+ trough the masters using keepalived IP. So, katrin network should be transparently visible from any pod in
+ the cluster.
+
+Users
+-----
+ I have configured a few user accounts using ADEI and UFO passwords. Furthermore, to avoid a mess of
+conteiners, I have created a number of projects with appropriate administrators.
+ kaas (csa, kopmann) - This is a routing service (basically Apache mod_rewrite) to set redirects from http://katrin.kit.edu/*
+ katrin (katrin) - Katrin database
+ adei (csa) - All ADEI setups
+ bora (ntj) - BORA
+ web (kopmann) - Various web sites, etc.
+ mon (csa) - Monitoring
+ test (*) - Project for testing
+
+If needed, I can create more projects/users. Just let me know.
+
+Storage
+-------
+ I have created a couple of gluster volumes for different purpose:
+ katrin_data: - For katrin data files
+ datastore - Other non-katrin large data files
+ openshift - 3 times replicated volume for configuration, sources, and other important small files
+ temporary - Logs, temporary files, etc.
+
+ Again, to not mess data from the different projects, on each volume there are subfolders for all projects. Furthermore,
+ I have tried to add a bit of protection and assigned each project a range of group ids. The subfolders can only be read
+ by appropriate group. I also pre-created correpsonding PersistentVolume (pv) and PersistentVolumeClaims (pvc): 'katrin', 'data', ...
+
+ There is a special pvc called 'host'. This is to save data on the local raid array bypassing gluster (i.e. on each OpenShift node
+ the content of the folder will be different).
+
+ WARNING: Gluster supports dynamic provisioning using Heketi. It is installed and worked. However, heketi is far from being
+ of production quality. I think it is OK to use it for some temporary data if you want, but I would suggest to use pre-created
+ volumes for important data.
+
+ - Curently, I don't plan to provide access to the servers itself. The storage should be managed from the OpenShift pods solely.
+ I made a sample 'manager' pod equipped with scp, lftp, curl, etc. It mounts all default storage. You need to start it and, then,
+ you also can connect interactively either using both web interace and console app.
+ oc -n katrin scale dc/kaas-manager --replicas 1
+ oc -n katrin rsh dc/kaas-manager
+ Just an example, build your own configuration with required set of packages.
+
+Databases
+---------
+ Gluster works fine if you mostly read data or if you perform mostly sequential writes. It plays very bad with 'databases' and similar
+ loads. I guess it should not be issue for Katrin database as it is relatively small (AFAIK) and do not perform many writes. For something,
+ like ADEI the gluster is not viable option to back MySQL server. There are several options to handle volumes for appliations performing a
+ large amount of small random writes:
+ - If High Availability (HA) is not important, just pin a pod to a certain node and use 'host' pvc.
+ - For databases, either Master/Slave replication can be enabled (you will still need to pin node and use 'host' pvc). The Galera cluster
+ can be installed for multi-master replication. It is configured using StatefulSet feature of OpenShift. I have not tested recovery throughly,
+ but it is working, quite performant, and masters are synchronized without problems.
+ - For non-database applications, the Gluster block storage may be used. The block storage is not shared between multiple pods, but private
+ to a specific pod. So, it is possible to avoid certain amount of locking and context switches. So, performance is significantly beter. I was
+ even able to run ADEI database on top of such device. Though it is still singificnatly slower than native host performance. There is again
+ heketi-based provisioner, but it works even worse when one providing standard Gluster volumes. So, I suggest to ask me to create block
+ devices manually if necessary.
+
+ Otherall, if you have data intensive workload, we can discuss the best approach.
+ \ No newline at end of file
diff --git a/docs/benchmarks/netpipe-hostnet-clusterip.txt b/docs/benchmarks/netpipe-hostnet-connect2clusterip.txt
index 452a59b..452a59b 100644
--- a/docs/benchmarks/netpipe-hostnet-clusterip.txt
+++ b/docs/benchmarks/netpipe-hostnet-connect2clusterip.txt
diff --git a/docs/benchmarks/netpipe-hostnet-hostip.txt b/docs/benchmarks/netpipe-hostnet-connect2hostip.txt
index 494289d..494289d 100644
--- a/docs/benchmarks/netpipe-hostnet-hostip.txt
+++ b/docs/benchmarks/netpipe-hostnet-connect2hostip.txt
diff --git a/docs/benchmarks/netpipe-pod2host-ownhost.txt b/docs/benchmarks/netpipe-pod2host-ownhost.txt
new file mode 100644
index 0000000..d49e340
--- /dev/null
+++ b/docs/benchmarks/netpipe-pod2host-ownhost.txt
@@ -0,0 +1,124 @@
+ 1 0.657660 0.00001160
+ 2 1.261032 0.00001210
+ 3 1.993782 0.00001148
+ 4 2.558493 0.00001193
+ 6 3.738559 0.00001224
+ 8 5.187374 0.00001177
+ 12 7.518725 0.00001218
+ 13 8.196018 0.00001210
+ 16 10.198276 0.00001197
+ 19 12.731722 0.00001139
+ 21 13.796771 0.00001161
+ 24 15.639661 0.00001171
+ 27 16.674961 0.00001235
+ 29 18.053967 0.00001226
+ 32 19.608304 0.00001245
+ 35 21.442525 0.00001245
+ 45 28.959251 0.00001186
+ 48 32.887048 0.00001114
+ 51 32.061613 0.00001214
+ 61 36.213737 0.00001285
+ 64 41.694947 0.00001171
+ 67 43.056353 0.00001187
+ 93 57.911407 0.00001225
+ 96 59.014557 0.00001241
+ 99 61.014493 0.00001238
+ 125 76.480392 0.00001247
+ 128 78.037922 0.00001251
+ 131 83.034218 0.00001204
+ 189 119.373976 0.00001208
+ 192 124.676617 0.00001175
+ 195 122.816000 0.00001211
+ 253 153.867359 0.00001254
+ 256 156.339712 0.00001249
+ 259 160.935138 0.00001228
+ 381 236.756142 0.00001228
+ 384 233.925416 0.00001252
+ 387 260.149962 0.00001135
+ 509 323.528140 0.00001200
+ 512 307.977445 0.00001268
+ 515 313.039822 0.00001255
+ 765 481.796856 0.00001211
+ 768 481.708998 0.00001216
+ 771 486.042697 0.00001210
+ 1021 645.735673 0.00001206
+ 1024 633.657979 0.00001233
+ 1027 636.196119 0.00001232
+ 1533 938.614280 0.00001246
+ 1536 869.867765 0.00001347
+ 1539 930.918606 0.00001261
+ 2045 802.337366 0.00001945
+ 2048 807.090888 0.00001936
+ 2051 772.193892 0.00002026
+ 3069 1170.196266 0.00002001
+ 3072 1219.905239 0.00001921
+ 3075 1174.839492 0.00001997
+ 4093 1464.200824 0.00002133
+ 4096 1566.409830 0.00001995
+ 4099 1512.849103 0.00002067
+ 6141 2286.945465 0.00002049
+ 6144 2288.367849 0.00002048
+ 6147 2207.269593 0.00002125
+ 8189 2923.440347 0.00002137
+ 8192 2941.778132 0.00002125
+ 8195 2904.909553 0.00002152
+ 12285 4165.044677 0.00002250
+ 12288 4039.677896 0.00002321
+ 12291 4252.381651 0.00002205
+ 16381 5656.041761 0.00002210
+ 16384 5614.844855 0.00002226
+ 16387 5382.844899 0.00002323
+ 24573 7719.968664 0.00002428
+ 24576 7414.582998 0.00002529
+ 24579 7817.458860 0.00002399
+ 32765 9775.709423 0.00002557
+ 32768 9442.714388 0.00002648
+ 32771 9770.830694 0.00002559
+ 49149 12826.142657 0.00002924
+ 49152 12626.640048 0.00002970
+ 49155 12477.873858 0.00003006
+ 65533 10571.284885 0.00004730
+ 65536 10570.211691 0.00004730
+ 65539 10084.322046 0.00004958
+ 98301 14402.304952 0.00005207
+ 98304 14642.413170 0.00005122
+ 98307 13935.428925 0.00005382
+ 131069 15142.825700 0.00006604
+ 131072 15790.566346 0.00006333
+ 131075 15281.133509 0.00006544
+ 196605 16728.089456 0.00008967
+ 196608 17013.589640 0.00008816
+ 196611 16828.700106 0.00008913
+ 262141 18961.512211 0.00010548
+ 262144 18106.104774 0.00011046
+ 262147 18399.271190 0.00010870
+ 393213 20323.824073 0.00014761
+ 393216 20333.341617 0.00014754
+ 393219 20240.703379 0.00014822
+ 524285 21133.676444 0.00018927
+ 524288 21646.010507 0.00018479
+ 524291 20236.384690 0.00019766
+ 786429 22905.368103 0.00026195
+ 786432 24588.762530 0.00024401
+ 786435 23509.571645 0.00025522
+ 1048573 24178.948658 0.00033087
+ 1048576 23115.860503 0.00034608
+ 1048579 24086.879088 0.00033213
+ 1572861 24694.199111 0.00048594
+ 1572864 26163.572804 0.00045865
+ 1572867 26574.262072 0.00045157
+ 2097149 25834.680617 0.00061932
+ 2097152 27875.424112 0.00057398
+ 2097155 28140.234242 0.00056858
+ 3145725 28596.052642 0.00083928
+ 3145728 28682.984164 0.00083673
+ 3145731 23866.770612 0.00100558
+ 4194301 26493.728895 0.00120783
+ 4194304 27059.094386 0.00118260
+ 4194307 24088.422769 0.00132844
+ 6291453 24962.945897 0.00192285
+ 6291456 27125.867373 0.00176953
+ 6291459 26348.212014 0.00182176
+ 8388605 25510.943520 0.00250873
+ 8388608 24568.383940 0.00260497
+ 8388611 26392.930637 0.00242489
diff --git a/docs/benchmarks/netpipe-pod2host.txt b/docs/benchmarks/netpipe-pod2host.txt
new file mode 100644
index 0000000..4d18f48
--- /dev/null
+++ b/docs/benchmarks/netpipe-pod2host.txt
@@ -0,0 +1,124 @@
+ 1 0.420894 0.00001813
+ 2 0.796803 0.00001915
+ 3 1.211204 0.00001890
+ 4 1.649525 0.00001850
+ 6 2.424251 0.00001888
+ 8 3.125162 0.00001953
+ 12 4.826660 0.00001897
+ 13 5.124437 0.00001935
+ 16 6.351908 0.00001922
+ 19 8.006427 0.00001811
+ 21 8.302512 0.00001930
+ 24 9.368963 0.00001954
+ 27 10.343990 0.00001991
+ 29 11.566140 0.00001913
+ 32 12.787361 0.00001909
+ 35 14.036485 0.00001902
+ 45 17.719754 0.00001938
+ 48 19.013013 0.00001926
+ 51 20.357754 0.00001911
+ 61 23.945906 0.00001944
+ 64 24.833939 0.00001966
+ 67 27.664993 0.00001848
+ 93 35.644683 0.00001991
+ 96 37.158695 0.00001971
+ 99 39.587251 0.00001908
+ 125 51.079576 0.00001867
+ 128 50.130411 0.00001948
+ 131 51.457388 0.00001942
+ 189 75.380130 0.00001913
+ 192 76.342438 0.00001919
+ 195 79.420945 0.00001873
+ 253 105.186533 0.00001835
+ 256 106.372298 0.00001836
+ 259 106.029274 0.00001864
+ 381 151.168028 0.00001923
+ 384 152.753542 0.00001918
+ 387 159.197729 0.00001855
+ 509 207.619883 0.00001870
+ 512 208.712379 0.00001872
+ 515 209.517685 0.00001875
+ 765 314.054051 0.00001858
+ 768 299.822502 0.00001954
+ 771 287.433917 0.00002046
+ 1021 397.548577 0.00001959
+ 1024 408.368406 0.00001913
+ 1027 420.775950 0.00001862
+ 1533 583.916264 0.00002003
+ 1536 572.817784 0.00002046
+ 1539 580.157425 0.00002024
+ 2045 499.904926 0.00003121
+ 2048 507.540741 0.00003079
+ 2051 524.322916 0.00002984
+ 3069 770.975516 0.00003037
+ 3072 746.563147 0.00003139
+ 3075 789.196027 0.00002973
+ 4093 1005.896826 0.00003104
+ 4096 1001.910613 0.00003119
+ 4099 1066.934103 0.00002931
+ 6141 1354.959563 0.00003458
+ 6144 1390.924953 0.00003370
+ 6147 1469.109813 0.00003192
+ 8189 1849.608991 0.00003378
+ 8192 1839.807660 0.00003397
+ 8195 1856.771767 0.00003367
+ 12285 2577.255660 0.00003637
+ 12288 2559.043820 0.00003663
+ 12291 2595.115904 0.00003613
+ 16381 3310.384291 0.00003775
+ 16384 3202.585996 0.00003903
+ 16387 3410.545389 0.00003666
+ 24573 4461.016945 0.00004203
+ 24576 4183.724225 0.00004482
+ 24579 4243.889480 0.00004419
+ 32765 5288.958972 0.00004726
+ 32768 5328.798686 0.00004691
+ 32771 5277.353091 0.00004738
+ 49149 6339.504613 0.00005915
+ 49152 6402.924842 0.00005857
+ 49155 6480.738141 0.00005787
+ 65533 4709.518059 0.00010616
+ 65536 4613.364349 0.00010838
+ 65539 4932.498325 0.00010137
+ 98301 6066.768938 0.00012362
+ 98304 5998.359888 0.00012503
+ 98307 6098.265480 0.00012299
+ 131069 6117.938429 0.00016345
+ 131072 6324.061201 0.00015813
+ 131075 6285.324621 0.00015910
+ 196605 6557.328589 0.00022875
+ 196608 8022.864625 0.00018697
+ 196611 8524.213528 0.00017597
+ 262141 8846.468887 0.00022608
+ 262144 8678.411984 0.00023046
+ 262147 8237.604968 0.00024279
+ 393213 11383.947500 0.00026353
+ 393216 11671.364535 0.00025704
+ 393219 12134.274110 0.00024724
+ 524285 10564.415738 0.00037863
+ 524288 10541.035553 0.00037947
+ 524291 12139.945493 0.00032949
+ 786429 13031.143983 0.00046043
+ 786432 13255.902187 0.00045263
+ 786435 13528.481196 0.00044351
+ 1048573 12102.584918 0.00066101
+ 1048576 11365.676465 0.00070387
+ 1048579 13355.335488 0.00059901
+ 1572861 11314.688623 0.00106057
+ 1572864 14604.826569 0.00082165
+ 1572867 11649.668141 0.00103007
+ 2097149 8779.830027 0.00182236
+ 2097152 12092.373128 0.00132315
+ 2097155 10640.598403 0.00150368
+ 3145725 11399.940287 0.00210527
+ 3145728 11327.829590 0.00211868
+ 3145731 12330.448131 0.00194640
+ 4194301 10110.197684 0.00316512
+ 4194304 1853.739580 0.01726240
+ 4194307 8969.381449 0.00356770
+ 6291453 10336.475983 0.00464375
+ 6291456 10847.818034 0.00442485
+ 6291459 12336.471285 0.00389090
+ 8388605 13628.581728 0.00469601
+ 8388608 10168.895623 0.00629370
+ 8388611 13799.286656 0.00463792
diff --git a/docs/benchmarks/netpipe-pod2pod-clusternet2hostnet.txt b/docs/benchmarks/netpipe-pod2pod-clusternet2hostnet.txt
new file mode 100644
index 0000000..4d18f48
--- /dev/null
+++ b/docs/benchmarks/netpipe-pod2pod-clusternet2hostnet.txt
@@ -0,0 +1,124 @@
+ 1 0.420894 0.00001813
+ 2 0.796803 0.00001915
+ 3 1.211204 0.00001890
+ 4 1.649525 0.00001850
+ 6 2.424251 0.00001888
+ 8 3.125162 0.00001953
+ 12 4.826660 0.00001897
+ 13 5.124437 0.00001935
+ 16 6.351908 0.00001922
+ 19 8.006427 0.00001811
+ 21 8.302512 0.00001930
+ 24 9.368963 0.00001954
+ 27 10.343990 0.00001991
+ 29 11.566140 0.00001913
+ 32 12.787361 0.00001909
+ 35 14.036485 0.00001902
+ 45 17.719754 0.00001938
+ 48 19.013013 0.00001926
+ 51 20.357754 0.00001911
+ 61 23.945906 0.00001944
+ 64 24.833939 0.00001966
+ 67 27.664993 0.00001848
+ 93 35.644683 0.00001991
+ 96 37.158695 0.00001971
+ 99 39.587251 0.00001908
+ 125 51.079576 0.00001867
+ 128 50.130411 0.00001948
+ 131 51.457388 0.00001942
+ 189 75.380130 0.00001913
+ 192 76.342438 0.00001919
+ 195 79.420945 0.00001873
+ 253 105.186533 0.00001835
+ 256 106.372298 0.00001836
+ 259 106.029274 0.00001864
+ 381 151.168028 0.00001923
+ 384 152.753542 0.00001918
+ 387 159.197729 0.00001855
+ 509 207.619883 0.00001870
+ 512 208.712379 0.00001872
+ 515 209.517685 0.00001875
+ 765 314.054051 0.00001858
+ 768 299.822502 0.00001954
+ 771 287.433917 0.00002046
+ 1021 397.548577 0.00001959
+ 1024 408.368406 0.00001913
+ 1027 420.775950 0.00001862
+ 1533 583.916264 0.00002003
+ 1536 572.817784 0.00002046
+ 1539 580.157425 0.00002024
+ 2045 499.904926 0.00003121
+ 2048 507.540741 0.00003079
+ 2051 524.322916 0.00002984
+ 3069 770.975516 0.00003037
+ 3072 746.563147 0.00003139
+ 3075 789.196027 0.00002973
+ 4093 1005.896826 0.00003104
+ 4096 1001.910613 0.00003119
+ 4099 1066.934103 0.00002931
+ 6141 1354.959563 0.00003458
+ 6144 1390.924953 0.00003370
+ 6147 1469.109813 0.00003192
+ 8189 1849.608991 0.00003378
+ 8192 1839.807660 0.00003397
+ 8195 1856.771767 0.00003367
+ 12285 2577.255660 0.00003637
+ 12288 2559.043820 0.00003663
+ 12291 2595.115904 0.00003613
+ 16381 3310.384291 0.00003775
+ 16384 3202.585996 0.00003903
+ 16387 3410.545389 0.00003666
+ 24573 4461.016945 0.00004203
+ 24576 4183.724225 0.00004482
+ 24579 4243.889480 0.00004419
+ 32765 5288.958972 0.00004726
+ 32768 5328.798686 0.00004691
+ 32771 5277.353091 0.00004738
+ 49149 6339.504613 0.00005915
+ 49152 6402.924842 0.00005857
+ 49155 6480.738141 0.00005787
+ 65533 4709.518059 0.00010616
+ 65536 4613.364349 0.00010838
+ 65539 4932.498325 0.00010137
+ 98301 6066.768938 0.00012362
+ 98304 5998.359888 0.00012503
+ 98307 6098.265480 0.00012299
+ 131069 6117.938429 0.00016345
+ 131072 6324.061201 0.00015813
+ 131075 6285.324621 0.00015910
+ 196605 6557.328589 0.00022875
+ 196608 8022.864625 0.00018697
+ 196611 8524.213528 0.00017597
+ 262141 8846.468887 0.00022608
+ 262144 8678.411984 0.00023046
+ 262147 8237.604968 0.00024279
+ 393213 11383.947500 0.00026353
+ 393216 11671.364535 0.00025704
+ 393219 12134.274110 0.00024724
+ 524285 10564.415738 0.00037863
+ 524288 10541.035553 0.00037947
+ 524291 12139.945493 0.00032949
+ 786429 13031.143983 0.00046043
+ 786432 13255.902187 0.00045263
+ 786435 13528.481196 0.00044351
+ 1048573 12102.584918 0.00066101
+ 1048576 11365.676465 0.00070387
+ 1048579 13355.335488 0.00059901
+ 1572861 11314.688623 0.00106057
+ 1572864 14604.826569 0.00082165
+ 1572867 11649.668141 0.00103007
+ 2097149 8779.830027 0.00182236
+ 2097152 12092.373128 0.00132315
+ 2097155 10640.598403 0.00150368
+ 3145725 11399.940287 0.00210527
+ 3145728 11327.829590 0.00211868
+ 3145731 12330.448131 0.00194640
+ 4194301 10110.197684 0.00316512
+ 4194304 1853.739580 0.01726240
+ 4194307 8969.381449 0.00356770
+ 6291453 10336.475983 0.00464375
+ 6291456 10847.818034 0.00442485
+ 6291459 12336.471285 0.00389090
+ 8388605 13628.581728 0.00469601
+ 8388608 10168.895623 0.00629370
+ 8388611 13799.286656 0.00463792
diff --git a/docs/configs.txt b/docs/configs.txt
new file mode 100644
index 0000000..df8eeda
--- /dev/null
+++ b/docs/configs.txt
@@ -0,0 +1,3 @@
+- GlusterFS Strip size
+ For RAID 6, the stripe unit size must be chosen such that the full stripe size (stripe unit * number of data disks) is between 1 MiB and 2 MiB, preferably in the lower end of the range.
+ Hardware RAID controllers usually allow stripe unit sizes that are a power of 2. For RAID 6 with 12 disks (10 data disks), the recommended stripe unit size is 128KiB.
diff --git a/docs/databases.txt b/docs/databases.txt
index 254674e..331313b 100644
--- a/docs/databases.txt
+++ b/docs/databases.txt
@@ -7,8 +7,9 @@
Gluster MyISAM (no logs) 1 MB/s unusable 150% 600-800% Perfect. But too slow (up to completely unusable if bin-logs are on). Slow MyISAM recovery!
Gluster/Block MyISAM (no logs) 5 MB/s slow, but OK 200% ~ 50% No problems on reboot, but requires manual work if node crashes to detach volume.
Galera INNODB 3.5 MB/s fast 3 x 200% - Should be perfect, but I am not sure about automatic recovery...
- MySQL Slaves INNODB 6-8 exp. fast Available data is HA, but caching is not. We can easily turn the slave to master.
- DRBD MyISAM (no logs) 4-6 exp. ? I expect it as an faster option, but does not fit complete concept.
+ Galera/Hostnet INNODB 4.6 MB/s fast 3 x 200% -
+ MySQL Slaves INNODB 5-8 MB/s fast 2 x 250% - Available data is HA, but caching is not. We can easily turn the slave to master.
+ DRBD MyISAM (no logs) 4-6 exp. ? I expect it as an faster option, but does not fit the OpenShift concept that well.
Gluster is a way too slow for anything. If node crashes, MyISAM tables may be left in corrupted state. The recovery will take ages to complete.
@@ -29,9 +30,13 @@ So, there is no realy a full HA capable solution at the moment. The most reasona
(i.e. status displays), current data is available. And we can easily switch the master if necessary.
The other reasonable options have some problems at the moment and can't be used.
- - Galera. Is a fine solution, but would need some degree of initial maintenance to work stabily. Furthermore, the caching is quite slow. And the
- resync is a big issue.
- - Gluster/Block would be a good solution if volume detachment is fixed. As it stands, we don't have HA without manual intervention. Furthermore, the
+ - Galera. Is a fine solution. The caching is still quite slow. If networking problem is solved (see performance section in network.txt) or host
+ networking is used, it more-or-less on pair with Gluster/Block, but provides much better service to the data reading clients. However, extra
+ investigations are required to understand robustness of crash recovery. In some cases, after a crash Galera was performing a full resync of all
+ data (but I was re-creating statefulset which is not recommended practice, not sure if it happens if the software maintained properly). Also, at
+ some point one of the nodes was not able to join back (even after re-initializing from scratch), but again this hopefully not happening if the
+ service is not pereodically recreated.
+ - Gluster/Block would be a good solution if volume detachment is fixed. As it stands, we don't have HA without manual intervention. Furthermore, the
MyISAM recovery is quite slow.
- HostMount will be using our 3-node storage optimally. But if something crashes there is 1 week to recache the data.
@@ -80,16 +85,21 @@ Galera
* If all nodes crashed, then again one node should restart the cluster and others join
later. For older versions, it is necessary to run mysqld with '--wsrep-new-cluster'.
The new tries to automatize it and will recover automatically if 'safe_to_bootstrap' = 1
- in 'grstate.dat' in mysql data folder. It should be set by Galera based on some heuristic,
- but in fact I always had to set it manually. IMIMPORTANT, it should be set only on one of
- the nodes.
-
- - Synchrinization only works for INNODB tables. Furthermore, binary logging should be turned
- on (yes, it is possible to turn it off and there is no complains, but only the table names are
- synchronized, no data is pushed between the nodes).
+ in 'grstate.dat' in mysql data folder. If cluster was shat down orderly, the Galera will
+ set it automatically on the last node to stop the service. In case of a crash, however,
+ it has to be configured manually on the most up to date node. IMIMPORTANT, it should be
+ set only on one of the nodes. Otherwise, the cluster will get nearly unrecoverable.
+ * So, to recover failed cluster (unless automatic recovery works) we must revert to manual
+ procedure now. There is 'gmanager' pod which can be scalled to 3 nodes. We recover a full
+ cluster in this pods in required order. Then, we stop first node and init a statefulSet.
+ As first node in the statefulSet is ready, we stop second node in 'gmanager' and so on.
+
+ - IMPORTANT: Synchrinization only works for INNODB tables. Furthermore, binary logging should
+ be turned on (yes, it is possible to turn it off and there is no complains, but only the table
+ names are synchronized, no data is pushed between the nodes).
- OpenShift uses 'StatefulSet' to perform such initialization. Particularly, it starts first
- node and waits until it is running before starting next one.
+ node and waits until it is running (and ready) before starting next one.
* Now the nodes need to talk between each other. The 'headless' service is used for that.
Unlinke standard service, the DNS does not load balance service pods, but returns IPs of
all service members if appropriate DNS request is send (SRV). In Service spec we specify.
@@ -112,7 +122,33 @@ Galera
serviceName: adei-ss
There are few other minor differences. For instance, the 'selector' have more flexible notation
and should include 'matchLabels' before specifying the 'pod' selector, etc.
+
+ - IMPORTANT: If we use hostPath (or even hostPath based pv/pvc pair), the pods will be assigned
+ to the nodes randomly. This is not ideal if we want to shutdown and restart cluster. In general,
+ we always want the first pod to end-up on the same storage as it will be likely the one able to
+ boostrap. Instead, we should use 'local' volume feature (alpha in OpenShift 3.7 and should be
+ enabled in origin-node and origin-master configurations). Then, openshift 'pvc' to specific node
+ and the 'pod' executed on the node where its 'pvc' is bounded.
+
+ - IMPORTANT: StatefulSet ensures ordering and local volume data binding. Consequently, we should
+ not destroy StatefulSet object which save the state information. Otherwise, the node assignments
+ will chnage and cluster would be hard to impossible to recover.
+
+ - Another problem of our setup is slow internal network (since bridging over Infiniband is not
+ possible). One solution to overcome this is to run Galera using 'hostNetwork'. Then, however,
+ the 'peer-finder' is failing. It tries to match the service names to its 'hostname' expecting
+ that it will be in the form of 'galera-0.galera.adei.svc.cluster.local', but with host networking
+ enabled the actual hostname is used (i.e. ipekatrin1.ipe.kit.edu). I have to patch peer-finder
+ to resolve IPs and try to match the IPs.
- To check current status of the cluster
SHOW STATUS LIKE 'wsrep_cluster_size';
+
+Master/Slave replication
+========================
+ - This configuration seems more robuts, but strangely has a lot of performance issues on the
+ slave side. Network is not a problem, it is able to get logs from the master, but it is significantly
+ slower in applying it. The main performance killer is disk sync operations triggered by 'sync_binlog',
+ INNODB log flashing, etc. Disabling it allows to bring performance on reasonable level. Still,
+ the master is caching at about 6-8 MB/s and slave at 4-5 MB/s only.
\ No newline at end of file
diff --git a/docs/info.txt b/docs/info.txt
new file mode 100644
index 0000000..ea00f58
--- /dev/null
+++ b/docs/info.txt
@@ -0,0 +1,31 @@
+oc -n adei patch dc/mysql --type=json --patch '[{"op": "remove", "path": "/spec/template/spec/nodeSelector"}]'
+oc process -f mysql.yml | oc -n adei replace dc/mysql -f -
+oc -n adei delete --force --grace-period=0 pod mysql-1-m4wcq
+We use rpcbind from the host.
+we need isciinitiators, rpcbind is used for host but check with telnet. The mother volumes are provisioned 100GiB large. So we can't allocate more.
+
+We can use rpcbind (and other services) from the host. Host networking.
+oc -n adei delete --force --grace-period=0 pod mysql-1-m4wcq
+| grep -oP '^GBID:\s*\K.*'
+
+Top level (nodeSelector restarPolciy SecurityContext)
+ dnsPolicy: ClusterFirstWithHostNet
+ dnsPolicy: ClusterFirst
+ hostNetwork: true
+oc -n kaas adm policy add-scc-to-user hostnetwork -z default
+Check (in users list)
+oc get scc hostnetwork -o yaml
+firewall-cmd --add-port=5002/tcp
+
+ OnDelete: This is the default update strategy for backward-compatibility. With OnDelete update strategy, after you update a DaemonSet template, new DaemonSet pods will only be created when you manually delete old DaemonSet pods. This is the same behavior of DaemonSet in Kubernetes version 1.5 or before.
+ RollingUpdate: With RollingUpdate update strategy, after you update a DaemonSet template, old DaemonSet pods will be killed, and new DaemonSet pods will be created automatically, in a controlled fashion.
+
+Caveat: Updating DaemonSet created from Kubernetes version 1.5 or before
+.spec.updateStrategy.rollingUpdate.maxUnavailable (default to 1) and .spec.minReadySeconds
+
+
+
+ “Default”: The Pod inherits the name resolution configuration from the node that the pods run on. See related discussion for more details.
+ “ClusterFirst”: Any DNS query that does not match the configured cluster domain suffix, such as “www.kubernetes.io”, is forwarded to the upstream nameserver inherited from the node. Cluster administrators may have extra stub-domain and upstream DNS servers configured. See related discussion for details on how DNS queries are handled in those cases.
+ “ClusterFirstWithHostNet”: For Pods running with hostNetwork, you should explicitly set its DNS policy “ClusterFirstWithHostNet”.
+ “None”: A new option value introduced in Kubernetes v1.9. This Alpha feature allows a Pod to ignore DNS settings from the Kubernetes environment. All DNS settings are supposed to be provided using the dnsConfig field in the Pod Spec. See DNS config subsection below.
diff --git a/docs/infrastructure.txt b/docs/infrastructure.txt
new file mode 100644
index 0000000..dc6a57e
--- /dev/null
+++ b/docs/infrastructure.txt
@@ -0,0 +1,110 @@
+Networks
+========
+ 192.168.11.0/24 (18-port IB switch): Legacy network, non-production systems including storage
+ 192.168.12.0/24 (12-port IB swotch): KATRIN Storage network
+ 192.168.13.0/24 (12-port IB switch): HPC Cloud & Computing network
+ 192.168.26.0/24 (Ethernet): Infrastructure network (OpenShift nodes and everything else)
+ 192.168.16.0/22 External IPs for testing and production
+ 192.168.111.0/24 (OpenVPN): Gateway to Katrin network using Master1 tunnel
+ 192.168.112.0/24 (OpenVPN): Gateway to Katrin network using Master2 tunnel
+
+ 192.168.212.0/24
+ 192.168.213.0/24
+ 192.168.226.0/24 (Ethernet): Staging network (Virtual OpenShift and other nodes)
+ 192.168.216.0/22 External IPs for staging
+ 192.168.221.0/24 (OpenVPN): Gateway to Katrin network using staging Master1 tunnel
+ 192.168.222.0/24 (OpenVPN): Gateway to Katrin network using staging Master2 tunnel
+
+KIT resources
+=============
+ - ipekatrin*.ipe.kit.edu Cluster nodes
+ - ipekatrin[1:2].ipe.kit.edu Master nodes with fixed IPs (one could be dead)
+ + katrin[1:2].ipe.kit.edu Virtual IPs assigned to master nodes (HA)
+ + kaas.kit.edu (katrin.ipe.kit.edu) DNS-based load balancer between katrin[1:2].ipe.kit.edu
+ + *.kaas.kit.edu (*.katrin.ipe.kit.edu) Default application domain?
+ - katrin.kit.edu Apache/mod_proxy pod (In DNS put CN to katrin.ipe.kit.edu)
+
+ + openshift.ipe.kit.edu Gateway (VIPS) to staging cluster (Just one IP migrating between 2 nodes)
+ - *.openshift.ipe.kit.edu Default application domain for staging cluster
+
+Storage
+=======
+ LVM VGs
+ VolGroup00
+ -> LogVol*: System partitions
+ -> docker-pool: Docker storage
+ Katrin
+ -> Heketi PD (we reserve space, but do not configure heketi so far)
+ -> vg_*
+ -> Heketi-managed Gluster Volumes
+ -> Katrin (mounted at '/mnt/ands')
+ -> Space for manually-managed Gluster Bricks
+ -> Storage for Galera / Cassandra / etc.?
+
+ Gluster Volume Types:
+ tmp: disitribute ? Various data which should be preserved, but not critical if lost or temporarily inaccessible (logs, etc.) [ check if we can still write if one brick is gone ]
+ cfg: replica=3 Small and critical data sets (configs, sources, etc.)
+ cache: replica+arbiter Large re-generatable data which anyway should be always available [ potentially we can use disperse to save space ]
+ data: replica+arbiter Very large and critical data
+ db: dispersed A few very large files, like large single-table database (ADEI many tables)
+
+ Scalling storage:
+ cfg: 3 nodes is enough
+ cache/data: [d][d][a] => [da][d ][ad][ d] => [d ][d ][ d][ d][aa] => further increas in pairs, at some point add second arbiter node
+
+ Gluster Volumes:
+ provision cfg /mnt/provision Provisioning volume which is not expected to be mounted in the containers (temporarily may contain secret information, etc.)
+ openshift cfg /mnt/openshift Multi-purpose: Various small size configurations (adei, apache, etc.)
+ temporary tmp /mnt/temporary Multi-purpose: Various logs & temporary files
+ ?adei cfg /mnt/adei/adei
+ adei-db cache /mnt/adei/db
+ adei-tmp tmp /mnt/adei/tmp
+ katrin-mysql data /mnt/katrin/mysql
+ katrin-data cfg /mnt/katrin/archive
+ katrin-kali cache /mnt/katrin/storage
+ katrin-tmp tmp /mnt/katrin/workspace
+
+ OpenShift Volumes:
+ etc cfg/ro openshift Various configurations (ADEI & Apache configs, other stuff in etc.)
+ src cfg/ro openshift Interpreted source files
+ log tmp/rw tmp Suff in /var/log
+ tmp tmp/rw tmp Various temporary files
+ adei-db data/rw adei-db ADEI cache database and a few primary source [ will take ages to regenerate, so we can't consider it as dispensable cache really ]
+ adei-tmp tmp/rw adei-tmp ADEI, Apache, and Cron logs [Techically we have also downloads here which are more cache when tmp... But I think it is fine for now...]
+ adei-cfg cfg/ro adei? ADEI & Apache configs
+ adei-src cfg/ro adei? ADEI sources
+ katrin-mysql cfg/rw katrin-mysql KATRIN Database with configurations, etc.
+ katrin-data data/rw katrin-data KATRIN data archives, all primary raw data from Orca, etc.
+ katrin-kali cache/rw katrin-kali Generated ROOT files [ Can we make this separation? Marco uses hardlinks ]
+ katrin-proc tmp/rw katrin-proc Data processing volume (inbox, etc.)
+
+Services
+========
+ - Keepalived
+ - OpenVPN
+ - Gluster
+ - MySQL Galera (?)
+ - Cassandra (?)
+ - oVirt (?)
+ - OpenShift Master / Node
+ - Heketi
+ - Apache Router
+ - ADEI Services
+ - Apache Spark & etc.
+
+Inventories
+===========
+ - staging & production will be operating in parallel (staging in vagrant and production on bare-metal)
+ - testing is just pre-production tests which will be removed once production is running
+
+Labels
+======
+ - We specify if node is master and provides fat storage for glusterfs
+ - All nodes currently in 'infra' region (for example, student computers will be non-infra nodes; nodes outside of KIT as well)
+ - The servers in cellar are in 'default' zone (if we put something in the 4th floor server room, we would define a new zone there)
+
+Computing
+=========
+ - Define CUDA nodes and OpenCL nodes
+ - Intel Xeon Phi is replaced by new Tesla in the ipepdvcompute2
+ - Gen1 UFO servers does not support "Above 64G decoding" and can't run Xeon Phi. May be we can put it in new Phi server.
diff --git a/docs/links.txt b/docs/links.txt
new file mode 100644
index 0000000..003cffe
--- /dev/null
+++ b/docs/links.txt
@@ -0,0 +1,16 @@
+- PXE boot on second network interface (put a small hub for this purpose) or Mellanox Flex Boot (check)
+ https://github.com/jonschipp/vagrant/tree/master/pxe-multiboot
+ http://www.tecmint.com/multiple-centos-installations-using-kickstart/
+
+- Ovirt
+ https://docs.ansible.com/ansible/ovirt_vms_module.html
+ http://www.ovirt.org/develop/release-management/features/infra/ansible_modules/
+ https://github.com/rhevm-qe-automation/ovirt-ansible
+
+- Galera on OpenShift
+ https://github.com/openshift/origin/tree/master/examples/statefulsets/mysql/galera
+
+- CUDA on OpenShift
+ https://blog.openshift.com/use-gpus-openshift-kubernetes/
+
+
diff --git a/docs/managment.txt b/docs/managment.txt
index 9436c3c..cfc6aff 100644
--- a/docs/managment.txt
+++ b/docs/managment.txt
@@ -17,7 +17,9 @@ DOs and DONTs
openshift_enable_service_catalog: false
Then, it is left in 'Error' state, but can be easily recovered by deteleting and
allowing system to re-create a new pod.
- * However, as cause is unclear, it is possible that something else with break as time
+ * On other hand, ksc completely breakes down if kept unchanged while upgrading from
+ 3.7.1 to 3.7.2. Updating ksc fixes the problem, except the error mentioned above.
+ * As the cause is unclear, it is possible that something else will break as time
passes and new images are released. It is ADVISED to check upgrade in staging first.
* During upgrade also other system pods may stuck in Error state (as explained
in troubleshooting) and block the flow of upgrade. Just delete them and allow
@@ -34,6 +36,10 @@ DOs and DONTs
openshift_storage_glusterfs_heketi_is_missing: False
But I am not sure if it is only major issue.
+ - Master/node configuration updates cause no problems. They are executed with:
+ * playbooks/openshift-node/config.yml
+ * playbooks/openshift-master/config.yml
+
- Few administrative tools could cause troubles. Don't run
* oc adm diagnostics
diff --git a/docs/network.txt b/docs/network.txt
index bcd45e1..52c0058 100644
--- a/docs/network.txt
+++ b/docs/network.txt
@@ -70,10 +70,11 @@ Performance
4 kEUR for SX6018). License is called: UPGR-6036-GW.
- Measured performance
- Standard: ~ 3.2 Gb/s
- Standard (pods on the same node) ~ 20 - 30 Gb/s
- hostNet (using cluster IP ) ~ 3.6 Gb/s
- hostNet (using host IP) ~ 12 - 15 Gb/s
+ Standard: ~ 3.2 Gb/s 28 us
+ Standard (pods on the same node) ~ 20 - 30 Gb/s 12 us
+ hostNet (using cluster IP ) ~ 3.6 Gb/s 23 us
+ hostNet (using host IP) ~ 12 - 15 Gb/s 15 us
+ Standard to hostNet ~ 10 - 12 Gb/s 18 us
- So, I guess the optimal solution is really to introduce a second router for the cluster, but with Ethernet interface. Then, we can
reconfigure the second Infiniband adapter for the Ethernet mode. The switch to native routing should be possible also with running
diff --git a/docs/performance.txt b/docs/performance.txt
new file mode 100644
index 0000000..b31c02a
--- /dev/null
+++ b/docs/performance.txt
@@ -0,0 +1,54 @@
+Divergence from the best practices
+==================================
+ Due to various constraints, I had take some decisions contradicting the best practices. There were also some
+ hardware limitations also resulting in suboptimal conifugration.
+
+ Storage
+ -------
+ - RedHat documentation strongly discourages running Gluster over large Raid-60. The best performance is achieved
+ if disks are organized as JBOD and each assigned a brick. The problem is that heketi is not really ready for
+ production yet. I got numerous problems with testing. Managing '3 x 24' gluster bricks manually would be a nightmare.
+ Consequently, i opted for Raid-60 to simplify maintenance and ensure no data is lost due to mismanagement of gluster
+ volumes.
+
+ - In general, the architecture is more suitable for many small servers, not just a couple of fat storage servers. Then,
+ the disk load will be distributed between multiple nodes. Furthermore, we are can't use all storage with 3 nodes.
+ We need 3 nodes to ensure abitrage in case of failure (or network outtages). Even if we the 3rd node only stores the
+ checksums, we ca't easily use it to store data. OK. Technically, we can create a 3 sets of 3 bricks and put the arbiter
+ brick on different nodes. But this again will complicate maintenace. Unless proper ordering is maintained the replication
+ may happen between bricks on the same node, etc. So, again I decided to ensure fault tollerance over performance. We still
+ can use the space when cluster is scalled.
+
+ Network
+ -------
+ - To ensure high speed communication between pods running on different nodes, RedHat recommends to enable Container Native
+ Routing. This is done by creating a bridge for docker containers on the hardware network device instead of OpenVSwitch fabric.
+ Unfortunatelly, IPoIB is not providing Ethernet L2/L3 capabilities and it is impossible to use IB devices for bridging.
+ It is still may be possible to solve somehow, but further research is required. The easier solution is just to switch OpenShift
+ fabric to Ethernet. Anyway, we had idea to separate storage and OpenShift networks.
+
+ Memory
+ ------
+ - There is multiple docker storage engines. We are currently using LVM-based 'devicemapper'. To build container, the data is
+ copied from all image layers. The new 'overlay2' provides a virtual file system (overlayfs) joining all layers and performing
+ COW if the data is modified. It saves space, but more importantly it also enables page cache sharing reducing the memory
+ footprint if multiple containers sharing the same layers (and they do share CentOS base image at minimum). Another adantage a
+ slightly faster startup of containers with large images (as we don't need to copy all files). On the negative side, it is not
+ fully POSIX compliant. Some applications may have problems because. For major applications there is work-arrounds provided by
+ RedHat. But again, I opt for more standard 'devicemapper' to avoid hard to debug problems.
+
+
+What is required
+================
+ - We need to add at least another node. It will double the available storage and I expect significant improvement of storage
+ performance. Even better to have 5-6 nodes to split load.
+ - We need to switch Ethernet fabric for OpenShift network. Currently, it is not critical and will only add about 20% to ADEI
+ performance. However, it may become an issue if optimize ADEI database handling or get more network intensive applications in
+ the cluster.
+ - We need to re-evaluate RDMA support in GlusterFS. Currently, it is unreliable causing pods to hang indefinitely. If it is
+ fixed we can re-enable RDMA support for our volumes. It hopefully may further improve storage performance. Similarly, Gluster
+ block storage is significnatly faster for single-pod use case, but has significant stability issues at the moment.
+ - We need to check if OverlayFS causing any problems to applications we plan to run. Enabling overlayfs should be good for
+ our cron services and may reduce memory footprint.
+
+
diff --git a/docs/vagrant.txt b/docs/vagrant.txt
new file mode 100644
index 0000000..2cf3b43
--- /dev/null
+++ b/docs/vagrant.txt
@@ -0,0 +1,4 @@
+The staging setup is optimized to run in vagrant containers to perform tests before applying major modifications to
+production system. However, there are several pecularities to take care of
+ - Vagrant uses NAT networking on eth0 (mandatory) and generates the same IP on all nodes. This confuses the OpenShift.
+ As a solution: Customize NAT IPs and remove the default route on eth0 (configure standard dhcp on the second public interface)
diff --git a/group_vars/OSEv3.yml b/group_vars/OSEv3.yml
index 46482f4..7bf2fb1 100644
--- a/group_vars/OSEv3.yml
+++ b/group_vars/OSEv3.yml
@@ -1,8 +1,8 @@
### Deployment Type
openshift_deployment_type: origin
openshift_master_cluster_method: "native"
-openshift_release: "v3.7.1"
-#openshift_image_tag: "v3.7.1"
+openshift_release: "v3.7.2"
+#openshift_image_tag: "v3.7.2"
#containerized: true
containerized: false
@@ -31,8 +31,10 @@ os_firewall_use_firewalld: true
#ansible_service_broker_registry_url: "registry.access.redhat.com"
ansible_service_broker_etcd_image_tag: v3.2
-#test
-#openshift_enable_service_catalog: false
+osm_controller_args: {'feature-gates': ['PersistentLocalVolumes=true']}
+osm_api_server_args: {'feature-gates': ['PersistentLocalVolumes=true']}
+openshift_node_kubelet_args: {'feature-gates': ['PersistentLocalVolumes=true']}
+#openshift_node_kubelet_args: {'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
diff --git a/group_vars/baremetal.yml b/group_vars/baremetal.yml
index 9584cf0..be03d80 100644
--- a/group_vars/baremetal.yml
+++ b/group_vars/baremetal.yml
@@ -2,6 +2,7 @@
#glusterfs_transport: rdma
glusterfs_transport: tcp,rdma
ands_rdma_support: true
+ands_hostnet_db: false
# Size in GB to detect main Ands device
ands_data_device_threshold: 8192
diff --git a/playbooks/openshift-setup-project.yml b/playbooks/openshift-setup-project.yml
index f7d80e9..b36301b 100644
--- a/playbooks/openshift-setup-project.yml
+++ b/playbooks/openshift-setup-project.yml
@@ -10,6 +10,7 @@
vars:
kaas_projects: "{{ ands_openshift_projects.keys() }}"
kaas_openshift_volumes: "{{ ands_openshift_volumes }}"
+ kaas_storage_types: [ 'host' ]
kaas_single_project: "{{ ands_configure_project }}"
diff --git a/playbooks/openshift-setup-projects.yml b/playbooks/openshift-setup-projects.yml
index 00aabfd..16b9e66 100644
--- a/playbooks/openshift-setup-projects.yml
+++ b/playbooks/openshift-setup-projects.yml
@@ -17,6 +17,7 @@
vars:
kaas_projects: "{{ ands_openshift_projects.keys() }}"
kaas_openshift_volumes: "{{ ands_openshift_volumes }}"
+ kaas_storage_types: [ 'host' ]
- name: Configure users & user projects
diff --git a/roles/ands_facts/tasks/main.yml b/roles/ands_facts/tasks/main.yml
index ce5dd23..54c800a 100644
--- a/roles/ands_facts/tasks/main.yml
+++ b/roles/ands_facts/tasks/main.yml
@@ -13,7 +13,15 @@
- name: "Configuring storage facts"
include_tasks: "storage.yml"
+- name: "Configuring ands nodes"
+ include_tasks: "nodes.yml"
+
+- name: "Configuring ands volumes"
+ include_tasks: "volumes.yml"
+
- name: "Confirm that ands facts are configured"
set_fact:
ands_none: "{{ ands_none }}"
ands_facts_configured: true
+
+#- debug: msg="{{ ands_volume_hostraid_servers }}"
diff --git a/roles/ands_facts/tasks/node.yml b/roles/ands_facts/tasks/node.yml
new file mode 100644
index 0000000..e30442c
--- /dev/null
+++ b/roles/ands_facts/tasks/node.yml
@@ -0,0 +1,5 @@
+- name: "Associating public host names with ids"
+ set_fact: "ands_host_{{ host_id }}_public_hostname={{ host.value['ands_openshift_public_hostname'] }}"
+
+- name: "Associating openshift fqdn with ids"
+ set_fact: "ands_host_{{ host_id }}_openshift_fqdn={{ host.value['ands_openshift_fqdn'] }}"
diff --git a/roles/ands_facts/tasks/nodes.yml b/roles/ands_facts/tasks/nodes.yml
new file mode 100644
index 0000000..ebe8091
--- /dev/null
+++ b/roles/ands_facts/tasks/nodes.yml
@@ -0,0 +1,10 @@
+- name: Process all nodes
+ include_tasks: "node.yml"
+ run_once: true
+ delegate_to: "{{ groups['masters'][0] }}"
+ with_dict: "{{ hostvars }}"
+ vars:
+ host_id: "{{ host.value['ands_host_id'] }}"
+ host_name: "{{ host.value['ansible_hostname'] }}"
+ loop_control:
+ loop_var: host
diff --git a/roles/ands_facts/tasks/volume.yml b/roles/ands_facts/tasks/volume.yml
new file mode 100644
index 0000000..5ac4af3
--- /dev/null
+++ b/roles/ands_facts/tasks/volume.yml
@@ -0,0 +1,8 @@
+- name: "Associating volumes with domains"
+ set_fact: "ands_volume_{{ name }}_domain={{ domain }}"
+
+- name: "Associating volumes with servers"
+ set_fact: "ands_volume_{{ name }}_servers={{ domain_servers }}"
+
+- name: "Associating volumes with servers"
+ set_fact: "ands_volume_{{ name }}_server_ids={{ domain_server_ids }}"
diff --git a/roles/ands_facts/tasks/volumes.yml b/roles/ands_facts/tasks/volumes.yml
new file mode 100644
index 0000000..616202c
--- /dev/null
+++ b/roles/ands_facts/tasks/volumes.yml
@@ -0,0 +1,7 @@
+- name: Process all storage domains
+ include_tasks: "volumes_iterate.yml"
+ run_once: true
+ delegate_to: "{{ groups[domain.servers][0] }}"
+ with_items: "{{ ands_local_storage_domains | default([]) | union(ands_storage_domains) }}"
+ loop_control:
+ loop_var: domain
diff --git a/roles/ands_facts/tasks/volumes_iterate.yml b/roles/ands_facts/tasks/volumes_iterate.yml
new file mode 100644
index 0000000..979e12f
--- /dev/null
+++ b/roles/ands_facts/tasks/volumes_iterate.yml
@@ -0,0 +1,13 @@
+---
+- name: Iterate volumes
+ include_tasks: "volume.yml"
+ with_dict: "{{ domain.volumes }}"
+ vars:
+ name: "{{ volume.key }}"
+ path: "{{ volume.value.mount }}"
+ server_group: "{{ domain.servers }}"
+ domain_servers: "{{ groups[domain.servers] | map('extract', hostvars, 'ands_storage_hostname') | list }}"
+ domain_server_ids: "{{ groups[domain.servers] | map('extract', hostvars, 'ands_host_id') | list }}"
+ when: volume.value.mount is defined
+ loop_control:
+ loop_var: volume
diff --git a/roles/ands_kaas/00-local-volumes.yml.j2 b/roles/ands_kaas/00-local-volumes.yml.j2
new file mode 100644
index 0000000..8d1a1c8
--- /dev/null
+++ b/roles/ands_kaas/00-local-volumes.yml.j2
@@ -0,0 +1,67 @@
+---
+apiVersion: v1
+kind: Template
+metadata:
+ name: {{ kaas_project }}-local-volumes
+ annotations:
+ descriptions: "{{ kaas_project }} local volumes"
+objects:
+{% for name, vol in kaas_project_local_volumes.iteritems() %}
+{% set voltypes = kaas_storage_domains | json_query("[*].volumes." + vol.volume + ".type") %}
+{% set voltype = voltypes[0] | default('host') %}
+{% set mntpaths = kaas_storage_domains | json_query("[*].volumes." + vol.volume + ".mount") %}
+{% set mntpath = mntpaths[0] | default('') %}
+{% set oc_name = vol.name | default(name) | regex_replace('_','-') %}
+{% set cfgpath = vol.path | default("") %}
+{% set path = cfgpath if cfgpath[:1] == "/" else "/" + kaas_project + "/" + cfgpath %}
+{% if oc_name | regex_search("^" + kaas_project) %}
+{% set pvprefix = oc_name %}
+{% else %}
+{% set pvprefix = (kaas_project + "-" + oc_name) | regex_replace('_','-') %}
+{% endif %}
+{% set i = 0 %}
+{% for id in vol.nodes | default(hostvars[inventory_hostname]['ands_volume_' + vol.volume + '_server_ids']) %}
+{% set srvid = (id | string) %}
+{% set server_name = hostvars[inventory_hostname]['ands_host_' + srvid + '_public_hostname'] %}
+{% set openshift_name = hostvars[inventory_hostname]['ands_host_' + srvid + '_openshift_fqdn'] %}
+{% set pvname = pvprefix + '-' + server_name %}
+{% set pvcname = oc_name + '-' + (i|string) %}
+ - apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: {{ pvname }}
+ annotations:
+ "volume.alpha.kubernetes.io/node-affinity": '{
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ { "matchExpressions": [ { "key": "kubernetes.io/hostname", "operator": "In", "values": ["{{ openshift_name }}"] } ]}
+ ]
+ }
+ }'
+ spec:
+ storageClassName: kaas-local-storage
+ persistentVolumeReclaimPolicy: Retain
+ local:
+ path: "{{ mntpath }}{{ path }}"
+ readOnly: {{ not (vol.write | default(false)) }}
+ accessModes:
+ - ReadWriteOnce
+ capacity:
+ storage: {{ vol.capacity | default(kaas_default_volume_capacity) }}
+ claimRef:
+ name: {{ pvcname }}
+ namespace: {{ kaas_project }}
+ - apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: {{ pvcname }}
+ spec:
+ volumeName: {{ pvname }}
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ vol.capacity | default(kaas_default_volume_capacity) }}
+{% set i = i + 1 %}
+{% endfor %}
+{% endfor %}
diff --git a/roles/ands_kaas/tasks/do_apps.yml b/roles/ands_kaas/tasks/do_apps.yml
index 6738b7f..39283b4 100644
--- a/roles/ands_kaas/tasks/do_apps.yml
+++ b/roles/ands_kaas/tasks/do_apps.yml
@@ -1,16 +1,20 @@
- name: "Process KaaS apps"
include_tasks: "template.yml"
run_once: true
- with_items: "{{ kaas_project_apps }}"
+ with_dict: "{{ kaas_project_apps }}"
loop_control:
- loop_var: appname
+ loop_var: appitem
when:
- - app.provision | default(true)
- - (ands_configure_app == ands_none) or (app.name == ands_configure_app)
+ - appitem.value.provision | default(true)
+ - (ands_configure_app == ands_none) or (appname == ands_configure_app)
+ - appname | match(kaas_app_regexp | default(.*))
vars:
- app: "{{ kaas_project_config[appname] }}"
- name: "{{ app.name | default((app.pods.keys() | list)[0]) }}"
- instantiate: "{{ app.instantiate | default(false) }}"
+ app: "{{ kaas_project_config[appitem.key] }}"
+ appname: "{{ app.name | default(appitem.key) | regex_replace('_','-') }}"
+# appname: "{{ app.name | default((app.pods.keys() | list)[0]) }}"
+ options: "{{ app.options | default({}) }}"
+ delete: "{{ options.delete | default(true) }}"
+ instantiate: "{{ appitem.value.instantiate | default(false) }}"
load: "{{ app.load | default(false) }}"
pods: "{{ app.pods }}"
tmpl_name: "50-kaas-pods.yml.j2"
diff --git a/roles/ands_kaas/tasks/do_keys.yml b/roles/ands_kaas/tasks/do_keys.yml
new file mode 100644
index 0000000..391392e
--- /dev/null
+++ b/roles/ands_kaas/tasks/do_keys.yml
@@ -0,0 +1,6 @@
+- name: Load OpenSSL keys
+ include_tasks: keys.yml
+ run_once: true
+ with_dict: "{{ kaas_project_pods }}"
+ loop_control:
+ loop_var: pod
diff --git a/roles/ands_kaas/tasks/do_project.yml b/roles/ands_kaas/tasks/do_project.yml
index f5b3276..71a54ad 100644
--- a/roles/ands_kaas/tasks/do_project.yml
+++ b/roles/ands_kaas/tasks/do_project.yml
@@ -2,77 +2,24 @@
- name: Ensure OpenShift template directory exists
file: path="{{ kaas_template_path }}" state="directory" mode=0755 owner=root group=root
-- name: Configure KaaS volumes
- include_tasks: volume.yml
- run_once: true
-# delegate_to: "{{ groups.masters[0] }}"
- with_dict: "{{ kaas_project_volumes }}"
- loop_control:
- loop_var: osv
- vars:
- vt_query: "[*].volumes.{{osv.value.volume}}.type"
- voltype: "{{ (kaas_storage_domains | json_query(vt_query)) }}"
- mp_query: "[*].volumes.{{osv.value.volume}}.mount"
- mntpath: "{{ (kaas_storage_domains | json_query(mp_query)) }}"
- rp_query: "[*].volumes.{{osv.value.volume}}.path"
- realpath: "{{ (kaas_storage_domains | json_query(rp_query)) }}"
- osvpath: "{{ osv.value.path | default('') }}"
- prefix: "{{ ( osvpath[:1] == '/' ) | ternary('', '/' ~ kaas_project ~ '/') }}"
- path: "{{ mntpath[0] ~ prefix ~ osvpath }}"
- hostpath: "{{ realpath[0] is defined | ternary((realpath[0] | default('')) ~ prefix ~ osvpath, '') }}"
- name: "{{osv.key}}"
- volume: "{{osv.value}}"
- when: ( mntpath | length ) > 0
-
-- name: Check if static configuration exists
- local_action: stat path="{{ kaas_project_path }}/files/"
- register: result
-
-- name: Search static configuration
- include_tasks: search.yml
- when: result.stat.exists
-
-- name: Configure KaaS files
- include_tasks: file.yml
- run_once: true
-# delegate_to: "{{ groups.masters[0] }}"
- with_items: "{{ kaas_project_config.files | default(kaas_openshift_files) | default([]) }}"
- loop_control:
- loop_var: file
- vars:
- osv: "{{ kaas_project_volumes[file.osv] }}"
- vt_query: "[*].volumes.{{osv.volume}}.type"
- voltype: "{{ (kaas_storage_domains | json_query(vt_query)) }}"
- mp_query: "[*].volumes.{{osv.volume}}.mount"
- mntpath: "{{ (kaas_storage_domains | json_query(mp_query)) }}"
- rp_query: "[*].volumes.{{osv.volume}}.path"
- realpath: "{{ (kaas_storage_domains | json_query(rp_query)) }}"
- pvar: "kaas_{{ file.osv }}_path"
- path: "{{ hostvars[inventory_hostname][pvar] }}/{{ file.path }}"
- hvar: "kaas_{{ file.osv }}_hostpath"
- hostpath: "{{ hostvars[inventory_hostname][hvar] }}/{{ file.path }}"
- when: file.osv in kaas_project_volumes
-
-- name: Load OpenSSL keys
- include_tasks: keys.yml
-# delegate_to: "{{ groups.masters[0] }}"
- run_once: true
- with_dict: "{{ kaas_project_pods }}"
- loop_control:
- loop_var: pod
-
- name: "Run OC script"
include_tasks: ocscript.yml
-# delegate_to: "{{ groups.masters[0] }}"
run_once: true
when: kaas_project_config.oc is defined
-- name: "Configure all templates"
- include_tasks: templates.yml
-# delegate_to: "{{ groups.masters[0] }}"
+- block:
+ - name: Configure storage
+ include_tasks: do_storage.yml
+
+ - name: Configure SSL keys
+ include_tasks: do_keys.yml
+
+ - name: Configure all templates
+ include_tasks: templates.yml
+
+ - name: Install Applications
+ include_tasks: do_apps.yml
run_once: true
when:
- kaas_project_config.oc is undefined
-- name: Install Applications
- include_tasks: do_apps.yml
diff --git a/roles/ands_kaas/tasks/do_storage.yml b/roles/ands_kaas/tasks/do_storage.yml
index ee118fd..e79db56 100644
--- a/roles/ands_kaas/tasks/do_storage.yml
+++ b/roles/ands_kaas/tasks/do_storage.yml
@@ -18,8 +18,18 @@
volume: "{{osv.value}}"
when:
- ( mntpath | length ) > 0
- - (osv.type | default("host")) in [ "host" ]
+ - (kaas_storage_types is not defined) or ((osv.type | default("host")) in kaas_storage_types)
+- name: Check if static configuration exists
+ local_action: stat path="{{ kaas_project_path }}/files/"
+ register: result
+
+# Executed only if complete project is provisioned (not if we just care to provision per-node storage)
+- name: Search static configuration
+ include_tasks: search.yml
+ when:
+ - result.stat.exists
+ - kaas_storage_types is not defined
- name: Configure KaaS files
include_tasks: file.yml
@@ -40,4 +50,4 @@
hostpath: "{{ hostvars[inventory_hostname][hvar] }}/{{ file.path }}"
when:
- file.osv in kaas_project_volumes
- - (osv.type | default("host")) in [ "host" ]
+ - (kaas_storage_types is not defined) or ((osv.type | default("host")) in kaas_storage_types)
diff --git a/roles/ands_kaas/tasks/ocitem.yml b/roles/ands_kaas/tasks/ocitem.yml
index addb249..758cdaf 100644
--- a/roles/ands_kaas/tasks/ocitem.yml
+++ b/roles/ands_kaas/tasks/ocitem.yml
@@ -1,13 +1,35 @@
---
+- name: Storage
+ include_tasks: do_storage.yml
+ run_once: true
+ vars:
+ kaas_storage_regexp: "{{ ocitem.storage }}"
+ when: ocitem.storage is defined
+
+- name: Keys
+ include_tasks: do_keys.yml
+ run_once: true
+ vars:
+ kaas_keys_regexp: "{{ ocitem.keys }}"
+ when: ocitem.keys is defined
+
- name: OpenShift templates
include_tasks: templates.yml
run_once: true
vars:
- kaas_template_glob: "{{ ocitem.template }}"
- when: ocitem.template is defined
+ kaas_template_glob: "{{ ocitem.templates }}"
+ when: ocitem.templates is defined
+
+- name: OpenShift apps
+ include_tasks: do_apps.yml
+ run_once: true
+ vars:
+ kaas_app_regexp: "{{ ocitem.apps }}"
+ when: ocitem.apps is defined
- name: OpenShift commands
include_tasks: oc.yml
-# delegate_to: "{{ groups.masters[0] }}"
run_once: true
when: ocitem.oc is defined
+
+
diff --git a/roles/ands_kaas/tasks/project.yml b/roles/ands_kaas/tasks/project.yml
index ecb2035..26bd0cc 100644
--- a/roles/ands_kaas/tasks/project.yml
+++ b/roles/ands_kaas/tasks/project.yml
@@ -26,7 +26,7 @@
with_dict: "{{ kaas_block_volumes }}"
when: item.value.project == kaas_project
-- name: Get information about block volumes
+- name: Get more information about block volumes
delegate_to: "{{ groups.masters[0] }}"
shell: gluster-block info {{ item.value.volume }}/{{ item.key }} | grep -oP '^EXPORTED NODE.*:\s*\K.*' | tr ' ' '\n'
register: portal_info
@@ -55,11 +55,12 @@
- include_tasks: "do_{{ do_subrole | default('project') }}.yml"
vars:
var_name: "var_{{kaas_project}}_config"
- kaas_project_config: "{{ hostvars[inventory_hostname][var_name] }}"
- kaas_project_volumes: "{{ kaas_project_config.volumes | default(kaas_project_config.extra_volumes | default({}) | combine(kaas_openshift_volumes)) }}"
- kaas_project_pods: "{{ kaas_project_config.pods | default({}) }}"
- kaas_project_apps: "{{ kaas_project_config.apps | default([]) }}"
- kaas_project_gids: "{{ kaas_project_config.gids | default(kaas_openshift_gids) }}"
- kaas_project_uids: "{{ kaas_project_config.uids | default(kaas_openshift_uids) }}"
- kaas_blockvol_info: "{{ block_info }}"
+ kaas_project_config: "{{ hostvars[inventory_hostname][var_name] }}"
+ kaas_project_volumes: "{{ kaas_project_config.volumes | default(kaas_project_config.extra_volumes | default({}) | combine(kaas_openshift_volumes)) }}"
+ kaas_project_local_volumes: "{{ kaas_project_config.local_volumes | default({}) }}"
+ kaas_project_pods: "{{ kaas_project_config.pods | default({}) }}"
+ kaas_project_apps: "{{ kaas_project_config.apps | default([]) }}"
+ kaas_project_gids: "{{ kaas_project_config.gids | default(kaas_openshift_gids) }}"
+ kaas_project_uids: "{{ kaas_project_config.uids | default(kaas_openshift_uids) }}"
+ kaas_blockvol_info: "{{ block_info }}"
\ No newline at end of file
diff --git a/roles/ands_kaas/tasks/template.yml b/roles/ands_kaas/tasks/template.yml
index 418331a..87e45a6 100644
--- a/roles/ands_kaas/tasks/template.yml
+++ b/roles/ands_kaas/tasks/template.yml
@@ -1,9 +1,9 @@
-- name: "Populate template {{ tmpl_name }}"
+- name: "Populate template '{{ tmpl_name }}' in project '{{ kaas_project }}' for application '{{ appname | default('kaas') }}'"
template: src="{{ item }}" dest="{{ kaas_template_path }}/{{ dest_name }}" owner=root group=root mode="0644"
register: result
vars:
default_name: "{{ item | basename | regex_replace('\\.j2','') }}"
- dest_name: "{{ (name is defined) | ternary ( (name | default('')) + '.yml', default_name ) }}"
+ dest_name: "{{ (appname is defined) | ternary ( '90-' + (appname | default('')) + '.yml', default_name ) }}"
with_first_found:
- paths:
- "{{ role_path }}/templates/"
@@ -15,10 +15,13 @@
include_role: name="openshift_resource"
when: instantiate == true
vars:
- template: "{{ tmpl_name | basename | regex_replace('\\.j2','') }}"
+ default_name: "{{ tmpl_name | basename | regex_replace('\\.j2','') }}"
+ dest_name: "{{ (appname is defined) | ternary ( '90-' + (appname | default('')) + '.yml', default_name ) }}"
+ template: "{{ dest_name }}"
template_path: "{{ kaas_template_path }}"
project: "{{ kaas_project }}"
- recreate: "{{ result | changed | ternary (true, false) }}"
+ recreate: "{{ result | changed | ternary (delete | ternary(true, false), false) }}"
+ replace: "{{ result | changed | ternary (delete | ternary(false, true), false) }}"
# alternatively load template
# TODO
diff --git a/roles/ands_kaas/templates/00-local-volumes.yml.j2 b/roles/ands_kaas/templates/00-local-volumes.yml.j2
new file mode 100644
index 0000000..a97ffae
--- /dev/null
+++ b/roles/ands_kaas/templates/00-local-volumes.yml.j2
@@ -0,0 +1,50 @@
+---
+apiVersion: v1
+kind: Template
+metadata:
+ name: {{ kaas_project }}-local-volumes
+ annotations:
+ descriptions: "{{ kaas_project }} local volumes"
+objects:
+{% for name, vol in kaas_project_local_volumes.iteritems() %}
+{% set voltypes = kaas_storage_domains | json_query("[*].volumes." + vol.volume + ".type") %}
+{% set voltype = voltypes[0] | default('host') %}
+{% set mntpaths = kaas_storage_domains | json_query("[*].volumes." + vol.volume + ".mount") %}
+{% set mntpath = mntpaths[0] | default('') %}
+{% set oc_name = vol.name | default(name) | regex_replace('_','-') %}
+{% set cfgpath = vol.path | default("") %}
+{% set path = cfgpath if cfgpath[:1] == "/" else "/" + kaas_project + "/" + cfgpath %}
+{% if oc_name | regex_search("^" + kaas_project) %}
+{% set pvprefix = oc_name %}
+{% else %}
+{% set pvprefix = (kaas_project + "-" + oc_name) | regex_replace('_','-') %}
+{% endif %}
+{% for id in vol.nodes | default(hostvars[inventory_hostname]['ands_volume_' + vol.volume + '_server_ids']) %}
+{% set srvid = (id | string) %}
+{% set server_name = hostvars[inventory_hostname]['ands_host_' + srvid + '_public_hostname'] %}
+{% set openshift_name = hostvars[inventory_hostname]['ands_host_' + srvid + '_openshift_fqdn'] %}
+{% set pvname = pvprefix + '-' + server_name %}
+ - apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: {{ pvname }}
+ annotations:
+ "volume.alpha.kubernetes.io/node-affinity": '{
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ { "matchExpressions": [ { "key": "kubernetes.io/hostname", "operator": "In", "values": ["{{ openshift_name }}"] } ]}
+ ]
+ }
+ }'
+ spec:
+ storageClassName: {{ vol.sc | default('kaas-lst-' + pvprefix) }}
+ persistentVolumeReclaimPolicy: Retain
+ local:
+ path: "{{ mntpath }}{{ path }}"
+ readOnly: {{ not (vol.write | default(false)) }}
+ accessModes:
+ - ReadWriteOnce
+ capacity:
+ storage: {{ vol.capacity | default(kaas_default_volume_capacity) }}
+{% endfor %}
+{% endfor %}
diff --git a/roles/ands_kaas/templates/50-kaas-pods.yml.j2 b/roles/ands_kaas/templates/50-kaas-pods.yml.j2
index 761004d..8c7fe85 100644
--- a/roles/ands_kaas/templates/50-kaas-pods.yml.j2
+++ b/roles/ands_kaas/templates/50-kaas-pods.yml.j2
@@ -1,14 +1,20 @@
#jinja2: trim_blocks: "true", lstrip_blocks: "false"
---
+{% set app = app | default('{}') %}
apiVersion: v1
kind: Template
metadata:
- name: {{ name | default(kaas_project) }}-pods
+ name: {{ appname | default(kaas_project) }}-pods
annotations:
- descriptions: {{ kaas_project_config.description | default(name | default(kaas_project) ~ " auto-generated pod template") }}
+ descriptions: {{ kaas_project_config.description | default(appname | default(kaas_project) ~ " auto-generated pod template") }}
+{% set applabels = ( app.labels | default({}) | combine( { 'app': appname }) ) if appname is defined else (app.labels | default({})) %}
+{% if applabels | length > 0 %}
+ labels: {{ applabels | to_json }}
+{% endif %}
objects:
{% for name, pod in pods.iteritems() %}
{% set kind = pod.kind | default('DeploymentConfig') %}
+ {% set podname = pod.name | default(name) | regex_replace('_','-') %}
{% if pod.enabled | default(true) %}
{% set pubkey = "kaas_" ~ name ~ "_pubkey" %}
{% set privkey = "kaas_" ~ name ~ "_privkey" %}
@@ -17,19 +23,27 @@ objects:
{% set pod = pod[pod.variant] %}
{% endif %}
{% set sched = pod.sched | default({}) %}
+ {% set service = pod.service | default({}) %}
+ {% set headless = (service.headles | default(false)) if kind == 'StatefulSet' else false %}
+ {% set network = pod.network | default({}) %}
+ {% set hostnet = network.host | default(false) %}
{% set node_selector = (sched.selector is defined) | ternary(sched.selector, ands_default_node_selector | combine(sched.restrict | default({}))) %}
+ {% set labels = pod.general_labels | default({}) | combine(applabels) %}
{% if pod.service is defined %}
- {% if kind == 'StatefulSet' and pod.service.ports is defined %}
+ {% if headless and pod.service.ports is defined %}
- apiVersion: v1
kind: Service
metadata:
- name: {{ pod.name | default(name) }}-ss
+ name: {{ podname }}-ss
annotations: {{ pod.service.annotations | default({}) | combine({"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true" }) | to_json }}
+ {% if labels | length > 0 %}
+ labels: {{ labels | to_json }}
+ {% endif %}
spec:
clusterIP: None
publishNotReadyAddresses: True
selector:
- name: {{ pod.name | default(name) }}
+ name: {{ podname }}
ports:
{% for port in pod.service.ports %}
{% set portmap = (port | string).split('/') %}
@@ -41,13 +55,16 @@ objects:
- apiVersion: v1
kind: Service
metadata:
- name: {{ pod.name | default(name) }}
+ name: {{ podname }}
{% if pod.service.annotations is defined %}
annotations: {{ pod.service.annotations | to_json }}
{% endif %}
+ {% if labels | length > 0 %}
+ labels: {{ labels | to_json }}
+ {% endif %}
spec:
selector:
- name: {{ pod.name | default(name) }}
+ name: {{ podname }}
{% if pod.service.ip is defined %}
clusterIP: {{ pod.service.ip }}
{% endif %}
@@ -65,12 +82,15 @@ objects:
- apiVersion: v1
kind: Route
metadata:
- name: {{ pod.name | default(name) }}
+ name: {{ podname }}
+ {% if labels | length > 0 %}
+ labels: {{ labels | to_json }}
+ {% endif %}
spec:
host: {{ pod.service.host }}
to:
kind: Service
- name: {{ pod.name | default(name) }}
+ name: {{ podname }}
port:
targetPort: {{ (first_port[1] is defined) | ternary(first_port[1], first_port[0]) }}
{% if (first_port[0] == "80") %}
@@ -95,7 +115,10 @@ objects:
- apiVersion: {{ kaas_openshift_api_versions[kind] | default('v1') }}
kind: {{ kind }}
metadata:
- name: {{ pod.name | default(name) }}
+ name: {{ podname }}
+ {% if labels | length > 0 %}
+ labels: {{ labels | to_json }}
+ {% endif %}
spec:
replicas: {{ ( sched | default({})).replicas | default(1) }}
revisionHistoryLimit: 2
@@ -111,25 +134,59 @@ objects:
triggers:
- type: ConfigChange
{% if kind == 'StatefulSet' %}
- serviceName: {{ pod.name | default(name) }}-ss
+ {% if headless %}
+ serviceName: {{ podname }}-ss
+ {% else %}
+ serviceName: {{ podname }}
+ {% endif %}
selector:
matchLabels:
- name: {{ pod.name | default(name) }}
+ name: {{ podname }}
{% else %}
selector:
- name: {{ pod.name | default(name) }}
+ name: {{ podname }}
{% endif %}
+ {% if pod.pvc is defined %}
+ volumeClaimTemplates:
+ {% for name, pvc in pod.pvc.iteritems() %}
+ {% set pvcname = name | regex_replace('_','-') %}
+ {% set pv = kaas_project_local_volumes[pvcname] | default({}) %}
+ {% set oc_name = pv.name | default(pvcname) | regex_replace('_','-') %}
+ {% if oc_name | regex_search("^" + kaas_project) %}
+ {% set pvname = oc_name %}
+ {% else %}
+ {% set pvname = (kaas_project + "-" + oc_name) | regex_replace('_','-') %}
+ {% endif %}
+ - metadata:
+ name: {{ pvcname }}
+ spec:
+ storageClassName: {{ pvc.sc | default(pv.sc | default('kaas-lst-' + pvname)) }}
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ pvc.capacity | default(pv.capacity | default(kaas_default_volume_capacity)) }}
+ {% endfor %}
+ {% endif %}
template:
metadata:
- name: {{ pod.name | default(name) }}
- {% if kind == 'StatefulSet' %}
+ name: {{ podname }}
+ {% if headless %}
annotations: {{ pod.annotations | default({}) | combine({"pod.alpha.kubernetes.io/initialized": "true"}) | to_json }}
{% elif pod.annotations is defined %}
annotations: {{ pod.annotations | to_json }}
{% endif %}
- labels:
- name: {{ pod.name | default(name) }}
+ labels: {{ pod.labels | default({}) | combine(labels) | combine({'name': podname, 'app': (appname | default('kaas'))}) | to_json }}
spec:
+ {% if pod.sa is defined %}
+ serviceAccountName: {{ pod.sa }}
+ {% endif %}
+ hostNetwork: {{ hostnet }}
+ {% if (headless) and (hostnet) %}
+ dnsPolicy: {{ network.dns_policy | default('ClusterFirstWithHostNet') }}
+ {% elif network.dns_policy is defined %}
+ dnsPolicy: {{ network.dns_policy }}
+ {% endif %}
{% if node_selector | length > 0 %}
nodeSelector: {{ node_selector | to_json }}
{% endif %}
@@ -140,10 +197,12 @@ objects:
{% for img in pod.images %}
{% set imgidx = loop.index %}
{% for vol in (img.mappings | default([])) %}
- {% set oc_name = vol.name | default(name) | regex_replace('_','-') %}
+ {% if (vol.name | default(name)) in kaas_project_volumes.keys() %}
+ {% set oc_name = vol.name | default(name) | regex_replace('_','-') %}
- name: vol-{{imgidx}}-{{loop.index}}
persistentVolumeClaim:
claimName: {{ oc_name }}
+ {% endif %}
{% endfor %}
{% for vol in (img.hostpath | default([])) %}
- name: host-{{imgidx}}-{{loop.index}}
@@ -170,7 +229,7 @@ objects:
containers:
{% for img in pod.images %}
{% set imgidx = loop.index %}
- - name: {{ img.name | default(pod.name) | default(name) }}
+ - name: {{ img.name | default(podname) }}
image: {{ img.image }}
imagePullPolicy: {{ img.pull | default('Always') }}
{% if (img.command is defined) %}
@@ -179,13 +238,22 @@ objects:
{% if img.ports is defined %}
ports:
{% for port in img.ports %}
+ {% if hostnet %}
+ {% set portmap = (port | string).split('/') %}
+ - containerPort: {{ (portmap[1] is defined) | ternary(portmap[1], portmap[0]) }}
+ hostPort: {{ portmap[0] }}
+ {% else %}
- containerPort: {{ port }}
+ {% endif %}
{% endfor %}
{% elif pod.service.ports is defined %}
ports:
{% for port in pod.service.ports %}
{% set portmap = (port | string).split('/') %}
- containerPort: {{ (portmap[1] is defined) | ternary(portmap[1], portmap[0]) }}
+ {% if hostnet %}
+ hostPort: {{ portmap[0] }}
+ {% endif %}
{% endfor %}
{% endif %}
{% if kind == 'StatefulSet' %}
@@ -226,7 +294,12 @@ objects:
{% if img.mappings is defined or img.hostpath is defined %}
volumeMounts:
{% for vol in (img.mappings | default([])) %}
+ {% if vol.name in kaas_project_volumes.keys() %}
- name: vol-{{imgidx}}-{{loop.index}}
+ {% elif vol.name in kaas_project_local_volumes.keys() %}
+ {% set pvcname = vol.name | regex_replace('_','-') %}
+ - name: {{ pvcname }}
+ {% endif %}
subPath: {{ vol.path | default("") }}
mountPath: {{ vol.mount }}
{% endfor %}
diff --git a/roles/ands_network/defaults/main.yml b/roles/ands_network/defaults/main.yml
index 139e8b3..0170370 100644
--- a/roles/ands_network/defaults/main.yml
+++ b/roles/ands_network/defaults/main.yml
@@ -1 +1,3 @@
configure_network: "{{ ands_configure_network | default(false) }}"
+firewall_template_path: "{{ ands_paths.provision }}/firewall/{{ ansible_hostname }}"
+firewall_services: [ 'galera', 'netpipe' ] \ No newline at end of file
diff --git a/roles/ands_network/files/galera.xml b/roles/ands_network/files/galera.xml
new file mode 100644
index 0000000..15f908b
--- /dev/null
+++ b/roles/ands_network/files/galera.xml
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="utf-8"?>
+<service>
+ <short>MySQL/Galera</short>
+ <description>MySQL/Galera Database Server</description>
+ <port protocol="tcp" port="3306"/>
+ <port protocol="tcp" port="4567"/>
+ <port protocol="udp" port="4567"/>
+ <port protocol="tcp" port="4568"/>
+ <port protocol="tcp" port="4444"/>
+</service>
diff --git a/roles/ands_network/files/netpipe.xml b/roles/ands_network/files/netpipe.xml
new file mode 100644
index 0000000..0e7f355
--- /dev/null
+++ b/roles/ands_network/files/netpipe.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="utf-8"?>
+<service>
+ <short>NetPIPE</short>
+ <description>NetPIPE network benchmark</description>
+ <port protocol="tcp" port="5002"/>
+</service>
diff --git a/roles/ands_network/tasks/add_names.yml b/roles/ands_network/tasks/add_names.yml
new file mode 100644
index 0000000..3edde38
--- /dev/null
+++ b/roles/ands_network/tasks/add_names.yml
@@ -0,0 +1,28 @@
+# Currently EXCLUDED
+# Kind of post-install. We can include this in maitain later.
+
+# We should not do it before Gluster peers are probed, otherwise everything will fail.
+# Some peers will have names and others IPs.
+- name: Configure all storage hostnames in /etc/hosts
+ lineinfile: dest="/etc/hosts" line="{{ ip }} {{ fqdn }} {{ hostname }}" regexp="{{ fqdn }}" state="present"
+ when:
+ - hostvars[item]['ands_facts_configured'] is defined
+ vars:
+ ip: "{{ hostvars[item]['ands_storage_ip'] }}"
+ hostname: "{{ hostvars[item]['ands_hostname_storage'] }}"
+ fqdn: "{{ hostvars[item]['ands_hostname_storage'] ~ ands_inner_dot_domain }}"
+ with_inventory_hostnames:
+ - nodes
+ - new_nodes
+
+- name: Configure all public hostnames in /etc/hosts
+ lineinfile: dest="/etc/hosts" line="{{ ip }} {{ fqdn }} {{ hostname }}" regexp="{{ fqdn }}" state="present"
+ when:
+ - hostvars[item]['ands_facts_configured'] is defined
+ vars:
+ ip: "{{ hostvars[item]['ands_openshift_public_ip'] }}"
+ hostname: "{{ hostvars[item]['ands_hostname_public'] }}"
+ fqdn: "{{ hostvars[item]['ands_hostname_public'] ~ ands_inner_dot_domain }}"
+ with_inventory_hostnames:
+ - nodes
+ - new_nodes
diff --git a/roles/ands_network/tasks/common.yml b/roles/ands_network/tasks/common.yml
index f2fda00..940cde7 100644
--- a/roles/ands_network/tasks/common.yml
+++ b/roles/ands_network/tasks/common.yml
@@ -7,7 +7,6 @@
# - nodes
# - new_nodes
-
# This will not work properly unless 'ands_facts' are executed on all nodes.... This is checked by evaluating if 'ands_openshift_fqdn' is defined
- name: Configure all cluster hostnames in /etc/hosts
lineinfile: dest="/etc/hosts" line="{{ ip }} {{ fqdn }} {{ hostname }}" regexp="{{ fqdn }}" state="present"
diff --git a/roles/ands_network/tasks/firewall.yml b/roles/ands_network/tasks/firewall.yml
new file mode 100644
index 0000000..d5ba5f3
--- /dev/null
+++ b/roles/ands_network/tasks/firewall.yml
@@ -0,0 +1,32 @@
+- name: Ensure firewall template directory exists
+ file: path="{{ firewall_template_path }}" state="directory" mode=0644 owner=root group=root
+
+#Just in case we already added but not reloaded yet
+#- name: Reload firewalld rules
+# shell: firewall-cmd --reload
+
+- name: Get list of existing firewalld services
+ shell: "firewall-cmd --get-services | tr ' ' '\n'"
+ changed_when: false
+ register: services
+
+- name: Configure missing firewalld services
+ include_tasks: firewall_service.yml
+ with_items: "{{ firewall_services }}"
+ vars:
+ servicelist: "{{ services.stdout_lines }}"
+ loop_control:
+ loop_var: service
+
+- name: Reload firewalld rules
+ shell: firewall-cmd --reload
+
+- name: Enable MySQL and Galera services if ands_hostnet_db is enabled
+ firewalld: service="{{ item }}" state="enabled" permanent="true" immediate="true"
+ when: ands_hostnet_db | default(false)
+ with_items:
+ - mysql
+ - galera
+
+- name: Reload firewalld rules
+ shell: firewall-cmd --reload
diff --git a/roles/ands_network/tasks/firewall_service.yml b/roles/ands_network/tasks/firewall_service.yml
new file mode 100644
index 0000000..98bc866
--- /dev/null
+++ b/roles/ands_network/tasks/firewall_service.yml
@@ -0,0 +1,13 @@
+- name: "Copy firewalld service '{{ service }}'"
+ copy: src="{{ service }}.xml" dest="{{ firewall_template_path }}/{{ service }}.xml" owner=root group=root mode="0644"
+ register: result
+
+- name: "Delete old version of firewalld service '{{ service }}'"
+ command: "firewall-offline-cmd --remove-service={{ service }}"
+ when:
+ - service in servicelist
+ - result | changed
+
+- name: "Create firewalld service '{{ service }}'"
+ command: "firewall-offline-cmd --new-service-from-file='{{ firewall_template_path }}/{{ service }}.xml' --name={{ service }}"
+ when: (service not in servicelist) or (result | changed)
diff --git a/roles/ands_network/tasks/nm_configure.yml b/roles/ands_network/tasks/nm_configure.yml
index 4482705..57e40ca 100644
--- a/roles/ands_network/tasks/nm_configure.yml
+++ b/roles/ands_network/tasks/nm_configure.yml
@@ -1,4 +1,3 @@
-
- name: install needed network manager libs
yum: name='{{ item }}' state=installed
with_items:
@@ -21,6 +20,16 @@
cidr: "{{ ands_storage_cidr }}"
force: true
+- name: configure bridged openshift nework
+ include_tasks: nm_configure_connection.yml
+ vars:
+ bridge: "{{ ands_bridge }}"
+ name: "openshift"
+ iface: "{{ ands_inner_interface }}"
+ cidr: "{{ ands_openshift_cidr }}"
+ force: true
+ when: ands_enable_cnr | default(false)
+
- name: configure openshift nework
include_tasks: nm_configure_connection.yml
vars:
@@ -28,6 +37,8 @@
iface: "{{ ands_inner_interface }}"
cidr: "{{ ands_openshift_cidr }}"
force: true
+ when: not (ands_enable_cnr | default(false))
+
- name: configure public nework
include_tasks: nm_configure_connection.yml
@@ -37,3 +48,5 @@
cidr: "{{ ands_openshift_public_cidr }}"
alias: true
+- name: Configure firewall
+ include_tasks: firewall.yml
diff --git a/roles/ands_network/tasks/nm_configure_connection.yml b/roles/ands_network/tasks/nm_configure_connection.yml
index 18fc91e..9354fbf 100644
--- a/roles/ands_network/tasks/nm_configure_connection.yml
+++ b/roles/ands_network/tasks/nm_configure_connection.yml
@@ -1,15 +1,16 @@
-- name: "detect nm connection corresponding to interface '{{ iface }}'"
+- name: "detect nm connection corresponding to interface '{{ bridge | default(iface) }}'"
shell: "nmcli d show {{ iface | quote }} | grep CONNECTION | cut -d ':' -f 2- | sed -E -e 's/^[[:space:]]+//' | grep '^[[:alpha:]]'"
register: conres
failed_when: false
changed_when: false
-- name: "check if the requested ip '{{ cidr }}' is present on the interface '{{ iface }}'"
+- name: "check if the requested ip '{{ cidr }}' is present on the interface '{{ biface }}'"
set_fact:
ip_present: "{{ cidr | ipaddr('address') in ips }}"
vars:
- eth: "{{ hostvars[inventory_hostname]['ansible_' + iface] | default({}) }}"
+ biface: "{{ bridge | default(iface) }}"
+ eth: "{{ hostvars[inventory_hostname]['ansible_' + biface] | default({}) }}"
ipv4: "{{ eth['ipv4'] | default({}) }}"
q: "{{ eth | json_query('ipv4_secondaries[*].address') }}"
sec: "{{ ((q == ands_none) or (q == '')) | ternary([], q) }}"
@@ -27,9 +28,24 @@
- not (alias | default(false))
- not ip_present
-- name: "configure storage network interface '{{ iface }}' to '{{ cidr }}'"
+- name: "create bridge '{{ bridge }}' with cidr '{{ cidr }}'"
+ command: "nmcli connection add type bridge ifname {{ bridge | quote }} con-name {{ name }} ip4 {{ cidr }}"
+ when:
+ - bridge is defined
+ - (conres.rc != 0) or (not (delres | skipped))
+ - (conres.rc != 0) or (not (alias | default(false)))
+
+- name: "connect bridge '{{ bridge }}' to interface '{{ iface }}'"
+ command: "nmcli connection add type bridge-slave ifname {{ iface | quote }} master {{ bridge | quote }}"
+ when:
+ - bridge is defined
+ - (conres.rc != 0) or (not (delres | skipped))
+ - (conres.rc != 0) or (not (alias | default(false)))
+
+- name: "configure network interface '{{ iface }}' to '{{ cidr }}'"
command: "nmcli connection add type infiniband ifname {{ iface | quote }} con-name {{ name }} ip4 {{ cidr }}"
when:
+ - bridge is not defined
- (conres.rc != 0) or (not (delres | skipped))
- (conres.rc != 0) or (not (alias | default(false)))
@@ -41,10 +57,11 @@
- conres.rc == 0
- not ip_present
-
-- name: "add ip alias '{{ cidr }}' to network interface '{{ iface }}'"
- command: "nmcli connection up {{ conres.stdout | quote }}"
+- name: "start connection {{ cname }}"
+ command: "nmcli connection up {{ cname | quote }}"
register: alres
+ vars:
+ cname: "{{ (conres.stdout == '') | ternary(name, conres.stdout) }}"
when:
- not(alres | skipped)
- alres | succeeded
diff --git a/roles/openshift_resource/tasks/template.yml b/roles/openshift_resource/tasks/template.yml
index 188599f..3469464 100644
--- a/roles/openshift_resource/tasks/template.yml
+++ b/roles/openshift_resource/tasks/template.yml
@@ -21,10 +21,13 @@
with_sequence: start=0 count="{{resources | default([]) | length}}"
when: ((recreate|default(false)) or (results | changed)) and (results.results[item|int].rc == 0)
- - name: "{{ template }}: Populate resources to {{project}}"
- shell: "oc process -n {{project}} -f '{{ template_path }}/{{template}}' {{ template_args | default('') }} | oc create -n {{project}} -f - {{ create_args | default('') }}"
+# Replace often complains on various immutable variables it can't change. We ignore.
+ - name: "{{ template }}: Populate resources to {{project}} ({{ replace | ternary('replace', 'create') }})"
+ shell: "oc process -n {{project}} -f '{{ template_path }}/{{template}}' {{ template_args | default('') }} | oc {{ replace | ternary('replace', 'create') }} -n {{project}} -f - {{ create_args | default('') }}"
+ register: status
+ failed_when: (status.rc != 0) and not (replace | default(false))
when:
- - (recreate|default(false)) or (results | changed)
+ - (recreate | default(false)) or (replace | default(false)) or (results | changed)
- resources | length > 0
run_once: true
diff --git a/setup.sh b/setup.sh
index a5f49b9..57e002c 100755
--- a/setup.sh
+++ b/setup.sh
@@ -62,7 +62,7 @@ case "$action" in
shift
if [[ -n "$1" && ${1:0:1} != "-" ]]; then
- vars="$vars,ands_configure_app=$1"
+ vars="$vars ands_configure_app=$1"
shift
fi
apply playbooks/openshift-setup-apps.yml --extra-vars "$vars" "$@" || exit 1
diff --git a/setup/configs/volumes.yml b/setup/configs/volumes.yml
index 020c7d2..e03002c 100644
--- a/setup/configs/volumes.yml
+++ b/setup/configs/volumes.yml
@@ -23,7 +23,7 @@ ands_storage_domains:
volumes:
provision: { type: "cfg", mount: "{{ ands_paths.provision }}" }
- servers: "ands_storage_servers"
- clients: [ "masters", "new_masters" ]
+ clients: [ "storage_nodes", "new_storage_nodes" ]
volumes:
openshift: { type: "cfg", mount: "{{ ands_paths.openshift }}", nfs_clients: "{{ ands_nfs_clients }}" }
databases: { type: "db", mount: "{{ ands_paths.databases }}", access: "ReadOnlyMany" }
@@ -43,7 +43,7 @@ ands_storage_domains:
# - pdv:
ands_local_storage_domains:
- - servers: [ "ands_storage_servers" ]
+ - servers: "ands_storage_servers"
volumes:
hostraid: { type: "host", path: "/mnt/ands/hostmount", mount: "{{ ands_paths.hostraid }}" }
diff --git a/setup/projects/adei/templates/40-mysql-svc.yml.j2 b/setup/projects/adei/templates/40-mysql-svc.yml.j2
new file mode 100644
index 0000000..6263966
--- /dev/null
+++ b/setup/projects/adei/templates/40-mysql-svc.yml.j2
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: "mysql-service"
+objects:
+ - apiVersion: v1
+ kind: Service
+ metadata:
+ name: mysql
+ spec:
+ selector:
+ service: mysql
+ ports:
+ - name: "mysql"
+ port: 3306
+ targetPort: 3306
diff --git a/setup/projects/adei/templates/40-mysql-svc.yml.j2~ b/setup/projects/adei/templates/40-mysql-svc.yml.j2~
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/setup/projects/adei/templates/40-mysql-svc.yml.j2~
diff --git a/setup/projects/adei/vars/apps.yml b/setup/projects/adei/vars/apps.yml
new file mode 100644
index 0000000..20cdefe
--- /dev/null
+++ b/setup/projects/adei/vars/apps.yml
@@ -0,0 +1,5 @@
+apps:
+ mysql: { provision: true, instantiate: false }
+ galera: { provision: true, instantiate: false }
+# simple_mysql: { provision: false, instantiate: false }
+ phpmyadmin: { provision: true, instantiate: true }
diff --git a/setup/projects/adei/vars/globals.yml b/setup/projects/adei/vars/globals.yml
index 86911aa..8435926 100644
--- a/setup/projects/adei/vars/globals.yml
+++ b/setup/projects/adei/vars/globals.yml
@@ -5,8 +5,6 @@ adei_pod_history_limit: 2
adei_pod_env:
- name: "HOME"
value: "/tmp"
- - name: "MYSQL_SERVER"
- value: "mysql.adei.svc.cluster.local"
- name: "MYSQL_PORT"
value: "3306"
- name: "MYSQL_USER"
@@ -28,6 +26,8 @@ adei_pod_env:
value: "/adei/src"
adei_prod_env:
+ - name: "MYSQL_SERVER"
+ value: "mysql.adei.svc.cluster.local"
- name: "ADEI_SETUP"
value: "${setup}"
- name: "ADEI_RELEASE"
@@ -42,6 +42,8 @@ adei_prod_env:
value: "${continuous_caching}"
adei_log_env:
+ - name: "MYSQL_SERVER"
+ value: "mysql.adei.svc.cluster.local"
- name: "ADEI_SETUP"
value: "${setup}-logs"
- name: "ADEI_RELEASE"
@@ -54,6 +56,8 @@ adei_log_env:
value: "5"
adei_debug_env:
+ - name: "MYSQL_SERVER"
+ value: "mysql.adei.svc.cluster.local"
- name: "ADEI_SETUP"
value: "${setup}"
- name: "ADEI_RELEASE"
@@ -66,6 +70,8 @@ adei_debug_env:
value: "5"
adei_cron_env:
+ - name: "MYSQL_SERVER"
+ value: "mysql-master.adei.svc.cluster.local"
- name: "ADEI_SETUP"
value: "${setup}"
- name: "ADEI_SCHEDULER"
@@ -76,6 +82,8 @@ adei_update_env:
value: "${continuous_caching}"
adei_cache_env:
+ - name: "MYSQL_SERVER"
+ value: "mysql-master.adei.svc.cluster.local"
- name: "ADEI_SETUP"
value: "${setup}"
- name: "ADEI_URL"
@@ -86,6 +94,8 @@ adei_cache_env:
value: "5"
adei_arc_cache_env:
+ - name: "MYSQL_SERVER"
+ value: "mysql-master.adei.svc.cluster.local"
- name: "ADEI_SETUP"
value: "${setup}"
- name: "ADEI_URL"
@@ -96,6 +106,8 @@ adei_arc_cache_env:
value: "300"
adei_log_cache_env:
+ - name: "MYSQL_SERVER"
+ value: "mysql-master.adei.svc.cluster.local"
- name: "ADEI_SETUP"
value: "${setup}-logs"
- name: "ADEI_URL"
diff --git a/setup/projects/adei/vars/mysql.yml b/setup/projects/adei/vars/mysql.yml
new file mode 100644
index 0000000..3349598
--- /dev/null
+++ b/setup/projects/adei/vars/mysql.yml
@@ -0,0 +1,92 @@
+mysql:
+ options:
+ delete: false
+
+ pods:
+ mysql_master:
+ kind: StatefulSet
+ sa: "adeidb"
+ service: { ports: [ 3306 ] }
+ network: { host: "{{ ands_hostnet_db | default(false) }}" }
+ sched: { replicas: 1, strategy: "Recreate", selector: { hostid: "3" } }
+ groups: [ "adei_db" ]
+ labels: { 'service': 'mysql' }
+ pvc: { 'adei_master': {} }
+ images:
+ - image: "chsa/mysql:5.7"
+ command: [ "run-mysqld-master" ]
+ env:
+ - { name: "MYSQL_ROOT_PASSWORD", value: "secret@adei/root-password" }
+ - { name: "MYSQL_USER", value: "adei" }
+ - { name: "MYSQL_USER_PRIV_SUPER", value: "1" }
+ - { name: "MYSQL_PASSWORD", value: "secret@adei/adei-password" }
+ - { name: "MYSQL_DATABASE", value: "adei" }
+ - { name: "MYSQL_EXTRADB", value: "adei_%" }
+ - { name: "MYSQL_MASTER_USER", value: "replication" }
+ - { name: "MYSQL_MASTER_PASSWORD", value: "secret@adei/service-password" }
+ - { name: "MYSQL_PMA_PASSWORD", value: "secret@adei/pma-password" }
+ - { name: "MYSQL_MAX_CONNECTIONS", value: "500" }
+ - { name: "MYSQL_SYNC_BINLOG", value: "0" }
+ - { name: "MYSQL_FLUSH_LOG_TYPE", value: "2" }
+ - { name: "MYSQL_FLUSH_LOG_TIMEOUT", value: "300" }
+ - { name: "MYSQL_BINLOG_FORMAT", value: "MIXED" }
+# - { name: "MYSQL_BINLOG_FORMAT", value: "ROW" }
+# - { name: "MYSQL_BINLOG_FORMAT", value: "STATEMENT" }
+ mappings:
+ - { name: "adei_master", mount: "/var/lib/mysql/data" }
+ resources: { request: { cpu: 2000m, mem: 4Gi }, limit: { cpu: 6000m, mem: 32Gi } }
+ probes:
+ - { type: "liveness", port: 3306 }
+ - { type: "readiness", command: [ /bin/sh, -i, -c, MYSQL_PWD="$MYSQL_PASSWORD" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1' ], delay: "15", timeout: "5" }
+
+ mysql_slave:
+ kind: StatefulSet
+ sa: "adeidb"
+ service: { ports: [ 3306 ] }
+ network: { host: "{{ ands_hostnet_db | default(false) }}" }
+ sched: { replicas: 1, strategy: "Recreate", limit: { fat_storage: "1" } }
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - { key: "hostid", operator: "In", values: [ "1", "2" ] }
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchExpressions:
+ - { key: name, operator: In, values: [ "mysql-master", "mysql-slave" ] }
+ groups: [ "adei_db" ]
+ labels: { 'service': 'mysql' }
+ pvc: { 'adei_slave': {} }
+ images:
+ - image: "chsa/mysql:5.7"
+ command: [ "run-mysqld-slave" ]
+ env:
+ - { name: "MYSQL_ROOT_PASSWORD", value: "secret@adei/root-password" }
+ - { name: "MYSQL_USER", value: "adei" }
+ - { name: "MYSQL_USER_PRIV_SUPER", value: "1" }
+ - { name: "MYSQL_PASSWORD", value: "secret@adei/adei-password" }
+ - { name: "MYSQL_DATABASE", value: "adei" }
+ - { name: "MYSQL_EXTRADB", value: "adei_%" }
+ - { name: "MYSQL_MASTER_USER", value: "replication" }
+ - { name: "MYSQL_MASTER_SERVICE_NAME", value: "mysql-master" }
+ - { name: "MYSQL_MASTER_PASSWORD", value: "secret@adei/service-password" }
+ - { name: "MYSQL_PMA_PASSWORD", value: "secret@adei/pma-password" }
+ - { name: "MYSQL_MAX_CONNECTIONS", value: "500" }
+ - { name: "MYSQL_SYNC_BINLOG", value: "0" }
+ - { name: "MYSQL_FLUSH_LOG_TYPE", value: "2" }
+ - { name: "MYSQL_FLUSH_LOG_TIMEOUT", value: "300" }
+ - { name: "MYSQL_SLAVE_WORKERS", value: "8" }
+ - { name: "MYSQL_SLAVE_SKIP_ERRORS", value: "1007,1008,1050,1051,1054,1060,1061,1068,1094,1146,1304,1359,1476,1537" }
+ - { name: "MYSQL_BINLOG_FORMAT", value: "MIXED" }
+ mappings:
+ - { name: "adei_slave", mount: "/var/lib/mysql/data" }
+# - { name: "adei_init", mount: "/var/lib/init" }
+ resources: { request: { cpu: 2000m, mem: 4Gi }, limit: { cpu: 6000m, mem: 32Gi } }
+ probes:
+ - { type: "liveness", port: 3306 }
+ - { type: "readiness", command: [ /bin/sh, -i, -c, MYSQL_PWD="$MYSQL_PASSWORD" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1' ], delay: "15", timeout: "5" }
+# hooks:
+# - { type: "postStart", command: [ "/bin/bash", "/var/lib/init/mysql/initdb.sh" ] }
diff --git a/setup/projects/adei/vars/galera.yml b/setup/projects/adei/vars/mysql_galera.yml
index ea64daa..e986268 100644
--- a/setup/projects/adei/vars/galera.yml
+++ b/setup/projects/adei/vars/mysql_galera.yml
@@ -1,26 +1,35 @@
-galera_app:
- name: galera
- provision: true
- instantiate: false
+# We need to set cluster name in environment if we configure host networking...
+galera:
+ options:
+ delete: false
pods:
galera:
kind: StatefulSet
- service: { ports: [ 3306 ] }
+ sa: "adeidb"
+ service: { headless: true, ports: [ 3306 ] }
+ network: { host: "{{ ands_hostnet_db | default(false) }}" }
sched: { replicas: 3, strategy: "Recreate", restrict: { fat_storage: "1" } }
- update: { strategy: RollingUpdate, min_ready: 30 }
+# update: { strategy: RollingUpdate, min_ready: 30 }
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- { key: "hostid", operator: "In", values: [ "1", "2", "3" ] }
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchExpressions:
+ - { key: name, operator: In, values: [ "galera" ] }
groups: [ "adei_db" ]
+ pvc: { 'adei_galera': {} }
images:
- image: "chsa/mysql-galera:5.7"
command: [ "run-mysqld-galera" ]
ports: [ 3306, 4444, 4567, 4568 ]
env:
- - { name: "MYSQL_ROOT_PASSWORD", value: "secret@adei/adei-password" }
+ - { name: "MYSQL_ROOT_PASSWORD", value: "secret@adei/root-password" }
- { name: "MYSQL_USER", value: "adei" }
- { name: "MYSQL_USER_PRIV_SUPER", value: "1" }
- { name: "MYSQL_PASSWORD", value: "secret@adei/adei-password" }
@@ -28,13 +37,13 @@ galera_app:
- { name: "MYSQL_EXTRADB", value: "adei_%" }
- { name: "MYSQL_GALERA_USER", value: "xtrabackup_sst" }
- { name: "MYSQL_GALERA_PASSWORD", value: "secret@adei/service-password" }
+ - { name: "MYSQL_GALERA_CLUSTER", value: "galera-ss" }
mappings:
- - { name: "adei_init", mount: "/var/lib/init" }
- - { name: "adei_host", path: "galera", mount: "/var/lib/mysql/data" }
+ - { name: "adei_galera", mount: "/var/lib/mysql/data" }
resources: { request: { cpu: 2000m, mem: 4Gi }, limit: { cpu: 6000m, mem: 32Gi } }
-# probes:
-# - { type: "liveness", port: 3306 }
-# - { type: "readiness", command: [ /bin/sh, -i, -c, MYSQL_PWD="$MYSQL_PASSWORD" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE, -e 'SELECT 1' ], delay: "15", timeout: "5" }
+ probes:
+ - { type: "liveness", port: 3306 }
+ - { type: "readiness", command: [ /bin/sh, -i, -c, MYSQL_PWD="$MYSQL_PASSWORD" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1' ], delay: "15", timeout: "5" }
grecovery:
diff --git a/setup/projects/adei/vars/mysql_simple.yml b/setup/projects/adei/vars/mysql_simple.yml
new file mode 100644
index 0000000..d58a5fe
--- /dev/null
+++ b/setup/projects/adei/vars/mysql_simple.yml
@@ -0,0 +1,26 @@
+simple_mysql:
+ pods:
+ mysql:
+ service: { ports: [ 3306 ] }
+ sched: { replicas: 1, strategy: "Recreate", selector: { hostid: "3" } }
+ groups: [ "adei_db" ]
+ images:
+ - image: "centos/mysql-57-centos7"
+ env:
+ - { name: "MYSQL_USER", value: "adei" }
+ - { name: "MYSQL_PASSWORD", value: "secret@adei/adei-password" }
+ - { name: "MYSQL_ROOT_PASSWORD", value: "secret@adei/root-password" }
+ - { name: "MYSQL_DATABASE", value: "adei" }
+ - { name: "MYSQL_PMA_PASSWORD", value: "secret@adei/pma-password" }
+ - { name: "MYSQL_MAX_CONNECTIONS", value: "500" }
+ mappings:
+ - { name: "adei_init", mount: "/var/lib/init" }
+ - { name: "adei_host", path: "mysql", mount: "/var/lib/mysql/data" }
+# - { name: "adei_db", path: "mysql", mount: "/var/lib/mysql/data" }
+ resources: { request: { cpu: 2000m, mem: 4Gi }, limit: { cpu: 6000m, mem: 32Gi } }
+ probes:
+ - { port: 3306 }
+# - { type: "liveness", port: 3306 }
+# - { type: "readiness", command: [/bin/sh, -i, -c, MYSQL_PWD="$MYSQL_PASSWORD" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE, -e 'SELECT 1'] }
+ hooks:
+ - { type: "postStart", command: [ "/bin/bash", "/var/lib/init/mysql/initdb.sh" ] }
diff --git a/setup/projects/adei/vars/phpmyadmin.yml b/setup/projects/adei/vars/phpmyadmin.yml
new file mode 100644
index 0000000..63bd5d8
--- /dev/null
+++ b/setup/projects/adei/vars/phpmyadmin.yml
@@ -0,0 +1,16 @@
+phpmyadmin:
+ pods:
+ phpmyadmin:
+ service: { host: "phpmyadmin.{{ openshift_master_default_subdomain }}", ports: [ 80/8080 ] }
+ sched: { replicas: 1 }
+ images:
+ - image: "chsa/phpmyadmin-centos:4"
+ env:
+ - { name: "DB_SERVICE_HOST", value: "mysql.adei.svc.cluster.local" }
+ - { name: "DB_SERVICE_PORT", value: "3306" }
+ - { name: "DB_EXTRA_HOSTS", value: "mysql-master.adei.svc.cluster.local,mysql-slave.adei.svc.cluster.local,mysql.katrin.svc.cluster.local,galera.adei.svc.cluster.local" }
+# - { name: "DB_SERVICE_CONTROL_USER", value: "pma" }
+# - { name: "DB_SERVICE_CONTROL_PASSWORD", value: "secret@adei/pma-password" }
+ probes:
+ - { port: 8080, path: '/' }
+ \ No newline at end of file
diff --git a/setup/projects/adei/vars/pods.yml b/setup/projects/adei/vars/pods.yml
deleted file mode 100644
index 8857fcd..0000000
--- a/setup/projects/adei/vars/pods.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-pods:
- mysql:
- service: { ports: [ 3306 ] }
- sched: { replicas: 1, strategy: "Recreate", selector: { hostid: "3" } }
- groups: [ "adei_db" ]
- images:
- - image: "centos/mysql-57-centos7"
- env:
- - { name: "MYSQL_USER", value: "adei" }
- - { name: "MYSQL_PASSWORD", value: "secret@adei/adei-password" }
- - { name: "MYSQL_ROOT_PASSWORD", value: "secret@adei/root-password" }
- - { name: "MYSQL_DATABASE", value: "adei" }
- - { name: "MYSQL_PMA_PASSWORD", value: "secret@adei/pma-password" }
- - { name: "MYSQL_MAX_CONNECTIONS", value: "500" }
- mappings:
- - { name: "adei_init", mount: "/var/lib/init" }
- - { name: "adei_host", path: "mysql", mount: "/var/lib/mysql/data" }
-# - { name: "adei_db", path: "mysql", mount: "/var/lib/mysql/data" }
- resources: { request: { cpu: 2000m, mem: 4Gi }, limit: { cpu: 6000m, mem: 32Gi } }
- probes:
- - { port: 3306 }
-# - { type: "liveness", port: 3306 }
-# - { type: "readiness", command: [/bin/sh, -i, -c, MYSQL_PWD="$MYSQL_PASSWORD" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE, -e 'SELECT 1'] }
- hooks:
- - { type: "postStart", command: [ "/bin/bash", "/var/lib/init/mysql/initdb.sh" ] }
-
- phpmyadmin:
- service: { host: "phpmyadmin.{{ openshift_master_default_subdomain }}", ports: [ 80/8080 ] }
- sched: { replicas: 1 }
- images:
- - image: "chsa/phpmyadmin-centos:4"
- env:
- - { name: "DB_SERVICE_HOST", value: "mysql.adei.svc.cluster.local" }
- - { name: "DB_SERVICE_PORT", value: "3306" }
- - { name: "DB_EXTRA_HOSTS", value: "mysql.katrin.svc.cluster.local" }
-# - { name: "DB_SERVICE_CONTROL_USER", value: "pma" }
-# - { name: "DB_SERVICE_CONTROL_PASSWORD", value: "secret@adei/pma-password" }
- probes:
- - { port: 8080, path: '/' }
-
-
-
-apps:
- - "galera_app"
-
-
-
-
-#oc:
-# - template: "[0-3]*"
-# - template: "[4-6]*"
-# - resource: "route/apache"
-# oc: "expose svc/kaas --name apache --hostname=apache.{{ openshift_master_default_subdomain }}"
-# - template: "*"
- \ No newline at end of file
diff --git a/setup/projects/adei/vars/script.yml b/setup/projects/adei/vars/script.yml
new file mode 100644
index 0000000..cbd01ba
--- /dev/null
+++ b/setup/projects/adei/vars/script.yml
@@ -0,0 +1,8 @@
+oc:
+ - storage: ".*"
+ - keys: ".*"
+ - oc: "create sa adeidb"
+ resource: "sa/adeidb"
+ - oc: "{{ ands_hostnet_db | default(false) | ternary('adm policy add-scc-to-user hostnetwork -z adeidb', 'adm policy remove-scc-from-user hostnetwork -z adeidb') }}"
+ - templates: "*"
+ - apps: ".*"
diff --git a/setup/projects/adei/vars/volumes.yml b/setup/projects/adei/vars/volumes.yml
index 82f2e18..fdceaae 100644
--- a/setup/projects/adei/vars/volumes.yml
+++ b/setup/projects/adei/vars/volumes.yml
@@ -13,13 +13,21 @@ volumes:
adei_log: { volume: "temporary", path: "/adei/log", write: true } # per-replica (should be fine) temporary files
# adei_db: { volume: "databases", path: "/adei", write: true } # mysql
+# This is not part of volumes and the permissions should be always provisioned using files on adei_host 'osv'
+local_volumes:
+ adei_master: { volume: "hostraid", path: "/adei/mysql_master", nodes: [3], write: true }
+ adei_slave: { volume: "hostraid", path: "/adei/mysql_slave", nodes: [1, 2], write: true }
+ adei_galera: { volume: "hostraid", path: "/adei/galera", write: true }
+
files:
- - { osv: "adei_cfg", path: "/", state: "directory", group: "adei", mode: "02775" }
- - { osv: "adei_src", path: "/", state: "directory", group: "adei", mode: "02775" }
- - { osv: "adei_src", path: "/prod", state: "directory", group: "adei", mode: "02775" }
- - { osv: "adei_src", path: "/dbg", state: "directory", group: "adei", mode: "02775" }
- - { osv: "adei_log", path: "/", state: "directory", group: "adei", mode: "02775" }
- - { osv: "adei_tmp", path: "/", state: "directory", group: "adei", mode: "02775" }
- - { osv: "adei_host",path: "mysql", state: "directory", group: "adei_db", mode: "02775" }
- - { osv: "adei_host",path: "galera", state: "directory", group: "adei_db", mode: "02775" }
+ - { osv: "adei_cfg", path: "/", state: "directory", group: "adei", mode: "02775" }
+ - { osv: "adei_src", path: "/", state: "directory", group: "adei", mode: "02775" }
+ - { osv: "adei_src", path: "/prod", state: "directory", group: "adei", mode: "02775" }
+ - { osv: "adei_src", path: "/dbg", state: "directory", group: "adei", mode: "02775" }
+ - { osv: "adei_log", path: "/", state: "directory", group: "adei", mode: "02775" }
+ - { osv: "adei_tmp", path: "/", state: "directory", group: "adei", mode: "02775" }
+ - { osv: "adei_host",path: "mysql", state: "directory", group: "adei_db", mode: "02775" }
+ - { osv: "adei_host",path: "galera", state: "directory", group: "adei_db", mode: "02775" }
+ - { osv: "adei_host",path: "mysql_master", state: "directory", group: "adei_db", mode: "02775" }
+ - { osv: "adei_host",path: "mysql_slave", state: "directory", group: "adei_db", mode: "02775" }
# - { osv: "adei_db", path: "mysql", state: "directory", group: "adei_db", mode: "02775" }