| | 16 | For the purpose of orbit-lab, execute the below script on any of the console node in orbit as below: |
| | 17 | \\./create-kube-cluster.sh -c "cNode1 cNode2 ... cNodeM" -w "wNode1 wNode2 ... wNodeN" |
| | 18 | |
| | 19 | create-kube-cluster.sh |
| | 20 | {{{ |
| | 21 | #!/bin/bash |
| | 22 | |
| | 23 | rm -f cluster.yml |
| | 24 | rm -f rke |
| | 25 | rm -f config |
| | 26 | |
| | 27 | wget https://github.com/rancher/rke/releases/download/v0.2.1/rke_linux-amd64 |
| | 28 | mv rke_linux-amd64 rke |
| | 29 | chmod 754 rke |
| | 30 | |
| | 31 | |
| | 32 | usage () { |
| | 33 | echo "Usage:" |
| | 34 | echo " ./$(basename $0) -c \"cNode1 cNode2 ... cnodeN\" -w \"wNode1 wnode2 ... wnodeN\"" |
| | 35 | echo "Note: controllers hostnames and workers hostnames are to be enclosed in \"\"" |
| | 36 | exit 0 |
| | 37 | } |
| | 38 | |
| | 39 | if [[ ( $# == "--help") || $# == "-h" ]] |
| | 40 | then |
| | 41 | usage |
| | 42 | exit 0 |
| | 43 | fi |
| | 44 | |
| | 45 | if [ "$#" -lt 2 ]; then |
| | 46 | echo "Missing Kubernetes control and worker nodes" |
| | 47 | usage |
| | 48 | fi |
| | 49 | |
| | 50 | echo "# An example of an HA Kubernetes cluster for ONAP" >> cluster.yml |
| | 51 | echo "nodes:" >> cluster.yml |
| | 52 | |
| | 53 | while getopts c:w: option |
| | 54 | do |
| | 55 | case "${option}" |
| | 56 | in |
| | 57 | c) CONTROLLERS=${OPTARG};; |
| | 58 | w) WORKERS=${OPTARG};; |
| | 59 | esac |
| | 60 | done |
| | 61 | |
| | 62 | #omf load -i latest-onap-control.ndz -t ${CONTROLLERS// /,} |
| | 63 | #omf load -i latest-onap-worker.ndz -t ${WORKERS// /,} |
| | 64 | #omf tell -a on -t ${CONTROLLERS// /,},${WORKERS// /,} |
| | 65 | |
| | 66 | IFS=' ' read -ra C <<< "$CONTROLLERS" |
| | 67 | IFS=' ' read -ra W <<< "$WORKERS" |
| | 68 | |
| | 69 | echo "Testing node availability. This might take some time" |
| | 70 | for i in "${C[@]}"; do |
| | 71 | while ! ping -c 1 -n -w 1 $i &> /dev/null |
| | 72 | do |
| | 73 | printf "%c" "." |
| | 74 | done |
| | 75 | echo "127.0.0.1 localhost" > hosts |
| | 76 | echo "`ping $i -c 1 | grep "PING" | grep '('|awk '{gsub(/[()]/,""); print $3}'` ${i}" >> hosts |
| | 77 | scp hosts root@$i:/etc/hosts |
| | 78 | done |
| | 79 | |
| | 80 | for i in "${W[@]}"; do |
| | 81 | while ! ping -c 1 -n -w 1 $i &> /dev/null |
| | 82 | do |
| | 83 | printf "%c" "." |
| | 84 | done |
| | 85 | echo "127.0.0.1 localhost" > hosts |
| | 86 | echo "`ping $i -c 1 | grep "PING" | grep '('|awk '{gsub(/[()]/,""); print $3}'` ${i}" >> hosts |
| | 87 | scp hosts root@$i:/etc/hosts |
| | 88 | done |
| | 89 | echo "Availability check successful" |
| | 90 | |
| | 91 | for i in "${C[@]}"; do |
| | 92 | echo "- address: `ping $i -c 1 | grep "PING" | grep '('|awk '{gsub(/[()]/,""); print $3}'`" >> cluster.yml |
| | 93 | echo ' port: "22"' >> cluster.yml |
| | 94 | echo " role:" >> cluster.yml |
| | 95 | echo " - controlplane" >> cluster.yml |
| | 96 | echo " - etcd" >> cluster.yml |
| | 97 | echo " hostname_override: `ping $i -c 1 | grep 'PING' | awk '{print $2}' | awk -F . '{print $1}'`" >> cluster.yml |
| | 98 | echo " user: root" >> cluster.yml |
| | 99 | echo " ssh_key_path: '~/.ssh/id_rsa'" >> cluster.yml |
| | 100 | done |
| | 101 | |
| | 102 | echo "# worker nodes start " >> cluster.yml |
| | 103 | |
| | 104 | for i in "${W[@]}"; do |
| | 105 | echo "- address: `ping $i -c 1 | grep "PING" | grep '('|awk '{gsub(/[()]/,""); print $3}'`" >> cluster.yml |
| | 106 | echo ' port: "22"' >> cluster.yml |
| | 107 | echo " role:" >> cluster.yml |
| | 108 | echo " - worker" >> cluster.yml |
| | 109 | echo " hostname_override: `ping $i -c 1 | grep 'PING' | awk '{print $2}' | awk -F . '{print $1}'`" >> cluster.yml |
| | 110 | echo " user: root" >> cluster.yml |
| | 111 | echo " ssh_key_path: '~/.ssh/id_rsa'" >> cluster.yml |
| | 112 | done |
| | 113 | |
| | 114 | echo 'services: |
| | 115 | kube-api: |
| | 116 | service_cluster_ip_range: 10.43.0.0/16 |
| | 117 | pod_security_policy: false |
| | 118 | always_pull_images: false |
| | 119 | kube-controller: |
| | 120 | cluster_cidr: 10.42.0.0/16 |
| | 121 | service_cluster_ip_range: 10.43.0.0/16 |
| | 122 | kubelet: |
| | 123 | cluster_domain: cluster.local |
| | 124 | cluster_dns_server: 10.43.0.10 |
| | 125 | fail_swap_on: false |
| | 126 | network: |
| | 127 | plugin: canal |
| | 128 | authentication: |
| | 129 | strategy: x509 |
| | 130 | ssh_key_path: "~/.ssh/id_rsa" |
| | 131 | ssh_agent_auth: false |
| | 132 | authorization: |
| | 133 | mode: rbac |
| | 134 | ignore_docker_version: false |
| | 135 | kubernetes_version: "v1.13.5-rancher1-2" |
| | 136 | private_registries: |
| | 137 | - url: nexus3.onap.org:10001 |
| | 138 | user: docker |
| | 139 | password: docker |
| | 140 | is_default: true |
| | 141 | cluster_name: "onap" |
| | 142 | restore: |
| | 143 | restore: false |
| | 144 | snapshot_name: ""' >> cluster.yml |
| | 145 | |
| | 146 | ./rke up |
| | 147 | |
| | 148 | for i in "${C[@]}"; do |
| | 149 | scp kube_config_cluster.yml root@$i:~/.kube/config |
| | 150 | done |
| | 151 | |
| | 152 | exit 0 |
| | 153 | }}} |
| | 154 | |