Deploy Kubernetes: Kubespray (tested on 2.16)
Install Kubespray repositpry: Kubespray
git clone https://github.com/kubernetes-sigs/kubespray.git
git checkout tags/v2.16.0
We need to add all pre-configured stuff to configure kubespray inside kubespray repository.
Copy the inventory/k8s
folder in the ansible project.
Edit your inventory inventory/k8s/hosts.yaml
file to replace nodes and bastion IP addresses.
Display Ips of your cluster:
openstack server list -cName -cNetworks --sort-column Name -f value
sample hosts.yaml:
all:
hosts:
k8s-master-0:
ansible_host: 10.11.12.163
ansible_user: ubuntu
ip: 10.11.12.163
k8s-master-1:
ansible_host: 10.11.12.103
ansible_user: ubuntu
ip: 10.11.12.103
k8s-master-2:
ansible_host: 10.11.12.94
ansible_user: ubuntu
ip: 10.11.12.94
k8s-worker-0:
ansible_host: 10.11.12.73
ansible_user: ubuntu
ip: 10.11.12.73
k8s-worker-1:
ansible_host: 10.11.12.180
ansible_user: ubuntu
ip: 10.11.12.180
children:
kube_control_plane:
hosts:
k8s-master-0:
k8s-master-1:
k8s-master-2:
kube_node:
hosts:
k8s-master-0:
k8s-master-1:
k8s-master-2:
k8s-worker-0:
k8s-worker-1:
etcd:
hosts:
k8s-master-0:
k8s-master-1:
k8s-master-2:
k8s_cluster:
children:
kube_control_plane:
kube_node:
calico_rr:
hosts: {}
bastion:
hosts:
bastion:
ansible_host: 195.15.244.254
ansible_user: ubuntu
inventory/k8s/group_vars/all/all.yaml
file to replace the k8s api load balancer IP address in loadbalancer_apiserver
.
openstack loadbalancer show -c vip_address k8s-api -f value
inventory/k8s/group_vars/all/all.yaml
file to replace the k8s api load balancer IP address in loadbalancer_apiserver
.
openstack float ip list --port $(openstack loadbalancer show k8s-api -c vip_port_id -f value) -c "Floating IP Address" -f value
We can install Kubernetes via the kubespray playbook. Run ansible playbook by running:
ansible-playbook -i inventory/k8s/hosts.yaml --become --become-user=root cluster.yml
inventory/k8s/artifacts/admin.conf
with the public one.
Find ssh config for cluster access:
cat Kubespray/ssh-bastion.conf
Host X.X.X.X
Hostname X.X.X.X
StrictHostKeyChecking no
ControlMaster auto
ControlPath ~/.ssh/ansible-%r@%h:%p
ControlPersist 5m
Host 10.11.12.163 10.11.12.103 10.11.12.94 10.11.12.73 10.11.12.180
ProxyCommand ssh -F /dev/null -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -p 22 ubuntu@195.15.244.254
Export your administration file of your cluster in order to be able to interact with it.
export KUBECONFIG=/Users/leopoldjacquot/code/kubespray/inventory/k8s/artifacts/admin.conf
kubectl get nodes
Check networking.
To get the most recent and cluster-wide network connectivity report, run from any of the cluster nodes.
curl http://localhost:31081/api/v1/connectivity_check
You can list the pods that are running on your cluster.
kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system cilium-2zrbm 1/1 Running 0 2d16h
kube-system cilium-6t62k 1/1 Running 0 2d16h
kube-system cilium-9jln6 1/1 Running 0 2d16h
kube-system cilium-btvdw 1/1 Running 0 2d16h
kube-system cilium-n7vrp 1/1 Running 0 2d16h
kube-system cilium-operator-68ff55c94b-wfkw4 1/1 Running 0 2d16h
kube-system kube-apiserver-k8s-master-0 1/1 Running 0 2d16h
kube-system kube-apiserver-k8s-master-1 1/1 Running 0 2d16h
kube-system kube-apiserver-k8s-master-2 1/1 Running 0 2d16h
kube-system kube-controller-manager-k8s-master-0 1/1 Running 1 2d16h
kube-system kube-controller-manager-k8s-master-1 1/1 Running 0 2d16h
kube-system kube-controller-manager-k8s-master-2 1/1 Running 0 2d16h
kube-system kube-proxy-6njkh 1/1 Running 0 2d16h
kube-system kube-proxy-dmw54 1/1 Running 0 2d16h
kube-system kube-proxy-g5tdj 1/1 Running 0 2d16h
kube-system kube-proxy-gbvjn 1/1 Running 0 2d16h
kube-system kube-proxy-pf56h 1/1 Running 0 2d16h
kube-system kube-scheduler-k8s-master-0 1/1 Running 0 2d16h
kube-system kube-scheduler-k8s-master-1 1/1 Running 0 2d16h
kube-system kube-scheduler-k8s-master-2 1/1 Running 0 2d16h
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin (or other name)
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-rbac
subjects:
- kind: ServiceAccount
# Reference to upper's `metadata.name`
name: admin
# Reference to upper's `metadata.namespace`
namespace: default
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
kubectl apply -f admin_account.yaml
kubectl apply -f admin_role.yaml
APISERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
TOKEN=$(kubectl get secret $(kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode )
Communication with Kubernetes API, list namespace:
curl -X GET $APISERVER/api/v1/namespaces/ --header "Authorization: Bearer $TOKEN" --insecure
{
"kind": "NamespaceList",
"apiVersion": "v1",
"metadata": {
"resourceVersion": "772347"
},
"items": [
{
"metadata": {
"name": "default",
"uid": "ab798fdb-ea43-42b7-bdbb-a049ec3673b6",
"resourceVersion": "199",
"creationTimestamp": "2021-10-10T16:15:58Z",
"managedFields": [
{
"manager": "kube-apiserver",
"operation": "Update",
"apiVersion": "v1",
"time": "2021-10-10T16:15:58Z",
"fieldsType": "FieldsV1",
"fieldsV1": {"f:status":{"f:phase":{}}}
}
]
},
"spec": {
"finalizers": [
"kubernetes"
]
},
"status": {
"phase": "Active"
}
},
{
"metadata": {
"name": "kube-node-lease",
"uid": "9b5a0397-a59a-47b5-9637-11c0497f179c",
"resourceVersion": "6",
"creationTimestamp": "2021-10-10T16:15:56Z",
"managedFields": [
{
"manager": "kube-apiserver",
"operation": "Update",
"apiVersion": "v1",
"time": "2021-10-10T16:15:56Z",
"fieldsType": "FieldsV1",
"fieldsV1": {"f:status":{"f:phase":{}}}
}
]
},
"spec": {
"finalizers": [
"kubernetes"
]
},
"status": {
"phase": "Active"
}
},
{
"metadata": {
"name": "kube-public",
"uid": "06942f2a-ce84-45da-a661-c5fce770a8c0",
"resourceVersion": "5",
"creationTimestamp": "2021-10-10T16:15:56Z",
"managedFields": [
{
"manager": "kube-apiserver",
"operation": "Update",
"apiVersion": "v1",
"time": "2021-10-10T16:15:56Z",
"fieldsType": "FieldsV1",
"fieldsV1": {"f:status":{"f:phase":{}}}
}
]
},
"spec": {
"finalizers": [
"kubernetes"
]
},
"status": {
"phase": "Active"
}
},
{
"metadata": {
"name": "kube-system",
"uid": "c3536096-9c88-408d-ae57-2ceb39fad7e2",
"resourceVersion": "4",
"creationTimestamp": "2021-10-10T16:15:56Z",
"managedFields": [
{
"manager": "kube-apiserver",
"operation": "Update",
"apiVersion": "v1",
"time": "2021-10-10T16:15:56Z",
"fieldsType": "FieldsV1",
"fieldsV1": {"f:status":{"f:phase":{}}}
}
]
},
"spec": {
"finalizers": [
"kubernetes"
]
},
"status": {
"phase": "Active"
}
}
]
}