3. Helm Installation

Helm installation: https://github.com/kubernetes/helm/blob/master/docs/rbac.md

$ curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
$ kubectl create serviceaccount tiller --namespace kube-system
$ kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
$ helm init --wait --service-account tiller
$ helm repo update

Install Traefik - Træfik is a modern HTTP reverse proxy and load balancer

$ helm install stable/traefik --wait --name my-traefik --namespace kube-system --set serviceType=NodePort,dashboard.enabled=true,accessLogs.enabled=true,rbac.enabled=true,metrics.prometheus.enabled=true
$ kubectl describe svc my-traefik --namespace kube-system

Install rook - File, Block, and Object Storage Services for your Cloud-Native Environment

$ helm repo add rook-stable https://charts.rook.io/stable
$ helm install --wait --name rook-ceph --namespace rook-ceph-system rook-stable/rook-ceph --version v0.9.3
$ sleep 60

Create your Rook cluster

$ kubectl create -f https://raw.githubusercontent.com/rook/rook/release-0.9/cluster/examples/kubernetes/ceph/cluster.yaml

Running the Toolbox with ceph commands

$ kubectl create -f https://raw.githubusercontent.com/rook/rook/release-0.9/cluster/examples/kubernetes/ceph/toolbox.yaml

Create a storage class based on the Ceph RBD volume plugin

$ kubectl create -f https://raw.githubusercontent.com/rook/rook/release-0.9/cluster/examples/kubernetes/ceph/storageclass.yaml

Create a shared file system which can be mounted read-write from multiple pods

$ kubectl create -f https://raw.githubusercontent.com/rook/rook/release-0.9/cluster/examples/kubernetes/ceph/filesystem.yaml
$ sleep 150

Check the status of your Ceph installation

$ export ROOK_CEPH_TOOLS_POD=$(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath="{.items[0].metadata.name}")
$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph status
$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph osd status

Check health detail of Ceph cluster

$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph health detail

Check monitor quorum status of Ceph

$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph quorum_status --format json-pretty

Dump monitoring information from Ceph

$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph mon dump

Check the cluster usage status

$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph df

Check OSD usage of Ceph

$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph osd df

Check the Ceph monitor, OSD, pool, and placement group stats

$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph mon stat
$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph osd stat
$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph osd pool stats
$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph pg stat

List the Ceph pools in detail

$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph osd pool ls detail

Check the CRUSH map view of OSDs

$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph osd tree

List the cluster authentication keys

$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph auth list

Change the size of Ceph replica for “replicapool” pool

$ kubectl get cephblockpool --namespace=rook-ceph replicapool -o yaml | sed "s/size: 1/size: 3/" | kubectl replace -f -

List details for “replicapool”

$ kubectl describe cephblockpool --namespace=rook-ceph replicapool

See the manifest of the pod which should use rook/ceph

$ tee files/rook-ceph-test-job.yaml << EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: rook-ceph-test-pv-claim
spec:
  storageClassName: rook-ceph-block
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
---
apiVersion: batch/v1
kind: Job
metadata:
  name: rook-ceph-test
  labels:
    app: rook-ceph-test
spec:
  template:
    metadata:
      labels:
        app: rook-ceph-test
    spec:
      containers:
      - name: rook-ceph-test
        image: busybox
        command: [ 'dd', 'if=/dev/zero', 'of=/data/zero_file', 'bs=1M', 'count=100' ]
        volumeMounts:
          - name: rook-ceph-test
            mountPath: "/data"
      restartPolicy: Never
      volumes:
      - name: rook-ceph-test
        persistentVolumeClaim:
          claimName: rook-ceph-test-pv-claim
EOF

Check the ceph usage

$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph osd status
$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph df
$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph osd df

Apply the manifest

$ kubectl apply -f files/rook-ceph-test-job.yaml
$ sleep 10

Check the ceph usage again

$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph osd status
$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph df
$ kubectl -n rook-ceph exec ${ROOK_CEPH_TOOLS_POD} -- ceph osd df

List the Persistent Volume Claims

$ kubectl get pvc

Delete the job

$ kubectl delete job rook-ceph-test

Install Prometheus - Prometheus Operator creates/configures/manages Prometheus clusters atop Kubernetes

$ helm repo add coreos https://s3-eu-west-1.amazonaws.com/coreos-charts/stable/
$ helm install coreos/prometheus-operator --wait --name my-prometheus-operator --namespace monitoring
$ helm install coreos/kube-prometheus --name my-kube-prometheus --namespace monitoring --set alertmanager.ingress.enabled=true,alertmanager.ingress.hosts[0]=alertmanager.domain.com,alertmanager.storageSpec.volumeClaimTemplate.spec.storageClassName=rook-block,alertmanager.storageSpec.volumeClaimTemplate.spec.accessModes[0]=ReadWriteOnce,alertmanager.storageSpec.volumeClaimTemplate.spec.resources.requests.storage=20Gi,grafana.adminPassword=admin123,grafana.ingress.enabled=true,grafana.ingress.hosts[0]=grafana.domain.com,prometheus.ingress.enabled=true,prometheus.ingress.hosts[0]=prometheus.domain.com,prometheus.storageSpec.volumeClaimTemplate.spec.storageClassName=rook-block,prometheus.storageSpec.volumeClaimTemplate.spec.accessModes[0]=ReadWriteOnce,prometheus.storageSpec.volumeClaimTemplate.spec.resources.requests.storage=20Gi
$ GRAFANA_PASSWORD=$(kubectl get secret --namespace monitoring my-kube-prometheus-grafana -o jsonpath="{.data.password}" | base64 --decode ; echo)
$ echo "Grafana login: admin / $GRAFANA_PASSWORD"

Install Heapster - Compute Resource Usage Analysis and Monitoring of Container Clusters

$ helm install stable/heapster --name my-heapster --set rbac.create=true

Install Kubernetes Dashboard - General-purpose web UI for Kubernetes clusters

$ helm install stable/kubernetes-dashboard --name=my-kubernetes-dashboard --namespace monitoring --set ingress.enabled=true,rbac.clusterAdminRole=true