Kubernetes cluster with NFS storage

To facilitate rapid deployment for application testing, R&D, or evaluating features such as KubeVirt, I have developed a streamlined Kubernetes cluster setup integrated with an NFS server for persistent storage. This environment comprises one master node, two worker nodes, and a dedicated NFS server, enabling dynamic provisioning via custom PersistentVolume (PV), PersistentVolumeClaim (PVC), and StorageClass (SC) configurations. All associated infrastructure configuration files, Kubernetes manifests, and shell scripts are available in my GitHub repository (link below). This setup has been successfully validated in a home lab environment and is intended to accelerate prototyping and experimentation with virtualized workloads on Kubernetes.

Link for downloading manifests.

https://github.com/ranjeetbadhe/Quick-kubernetes-NFS-Cluster.git

[root@kubemaster nginx]# cat nfs-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nfs-pvc
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Gi
  storageClassName: nfs-sc

[root@kubemaster nginx]# cat nfs-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  nfs:
    path: /nfs
    server: 192.168.0.30
  persistentVolumeReclaimPolicy: Retain
  storageClassName: nfs-sc

[root@kubemaster nginx]# cat nfs-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-sc
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: Immediate
[root@kubemaster nginx]# kubectl apply -f nfs-storageclass.yaml
storageclass.storage.k8s.io/nfs-sc created
[root@kubemaster nginx]# kubectl get sc
NAME     PROVISIONER                    RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-sc   kubernetes.io/no-provisioner   Delete          Immediate           false                  7s
[root@kubemaster nginx]# cat nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: app
                    operator: In
                    values:
                      - nginx
              topologyKey: "kubernetes.io/hostname"
      containers:
        - name: nginx
          image: nginx:latest
          ports:
            - containerPort: 80
          volumeMounts:
            - name: nfs-vol
              mountPath: /usr/share/nginx/html
      volumes:
        - name: nfs-vol
          persistentVolumeClaim:
            claimName: nfs-pvc
[root@kubemaster nginx]# kubectl apply -f nfs-pv.yaml
persistentvolume/nfs-pv created

[root@kubemaster nginx]# kubectl get pv
NAME     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   VOLUMEATTRIBUTESCLASS   REASON   AGE
nfs-pv   1Gi        RWX            Retain           Available           nfs-sc                                   8s

[root@kubemaster nginx]# kubectl apply -f nfs-pvc.yaml
persistentvolumeclaim/nfs-pvc created


[root@kubemaster nginx]# kubectl get pvc
NAME      STATUS   VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   VOLUMEATTRIBUTESCLASS   AGE
nfs-pvc   Bound    nfs-pv   1Gi        RWX            nfs-sc                          4s


[root@kubemaster nginx]# kubectl apply -f nginx-deployment.yaml
deployment.apps/nginx-deployment created


[root@kubemaster nginx]# kubectl apply -f nginx-service.yaml
service/nginx-service created

[root@kubemaster nginx]# kubectl get pods
NAME                                      READY   STATUS    RESTARTS       AGE
nfs-client-provisioner-76c8c74464-rknqt   1/1     Running   16 (17h ago)   138d
nginx-deployment-58f667675d-r25cq         1/1     Running   0              7m5s
nginx-deployment-58f667675d-w566s         1/1     Running   0              6m52s
virt-launcher-testvm-ltxpc                3/3     Running   0              21m

[root@kubemaster nginx]# kubectl get pods -l app=nginx
NAME                                READY   STATUS    RESTARTS   AGE
nginx-deployment-58f667675d-r25cq   1/1     Running   0          8m13s
nginx-deployment-58f667675d-w566s   1/1     Running   0          8m

[root@kubemaster nginx]# kubectl get svc nginx-service
NAME            TYPE       CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
nginx-service   NodePort   10.98.1.43           80:30080/TCP   16m

[root@kubemaster nginx]#
[root@kubemaster nginx]# curl http://192.168.0.41:30080
ranjeet badhe with shared storage
[root@kubemaster nginx]# curl http://192.168.0.42:30080
ranjeet badhe with shared storage


[root@kubemaster nginx]# kubectl get pods
NAME                                      READY   STATUS    RESTARTS       AGE
nfs-client-provisioner-76c8c74464-rknqt   1/1     Running   16 (17h ago)   138d
nginx-deployment-58f667675d-r25cq         1/1     Running   0              13m
nginx-deployment-58f667675d-w566s         1/1     Running   0              12m
virt-launcher-testvm-ltxpc                3/3     Running   0              27m

[root@kubemaster nginx]# kubectl describe pod nginx-deployment-58f667675d-r25cq
Name:             nginx-deployment-58f667675d-r25cq
Namespace:        default
Priority:         0
Service Account:  default
Node:             kubeworker2.ranjeetbadhe.com/192.168.0.42
Start Time:       Mon, 16 Jun 2025 19:52:27 +0530
Labels:           app=nginx
                  pod-template-hash=58f667675d
Annotations:      cni.projectcalico.org/containerID: 9efa0e9d03bce76376dad70ade3145ae3ca63218ffe26bb186bdb38d97430624
                  cni.projectcalico.org/podIP: 10.244.50.136/32
                  cni.projectcalico.org/podIPs: 10.244.50.136/32
Status:           Running
IP:               10.244.50.136
IPs:
  IP:           10.244.50.136
Controlled By:  ReplicaSet/nginx-deployment-58f667675d
Containers:
  nginx:
    Container ID:   containerd://045f48787897cd085d9b1483f2b7f3e7794423db68180e034e25c5d3a8d8ffc2
    Image:          nginx:latest
    Image ID:       docker.io/library/nginx@sha256:6784fb0834aa7dbbe12e3d7471e69c290df3e6ba810dc38b34ae33d3c1c05f7d
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Mon, 16 Jun 2025 19:58:55 +0530
    Ready:          True
    Restart Count:  0
    Environment:    
    Mounts:
      /usr/share/nginx/html from nfs-vol (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-m2xt7 (ro)
Conditions:
  Type                        Status
  PodReadyToStartContainers   True
  Initialized                 True
  Ready                       True
  ContainersReady             True
  PodScheduled                True
Volumes:
  nfs-vol:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  nfs-pvc
    ReadOnly:   false
  kube-api-access-m2xt7:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:                      
[root@kubemaster nginx]# kubectl describe pod nginx-deployment-58f667675d-w566s
Name:             nginx-deployment-58f667675d-w566s
Namespace:        default
Priority:         0
Service Account:  default
Node:             kubeworker1.ranjeetbadhe.com/192.168.0.41
Start Time:       Mon, 16 Jun 2025 19:52:40 +0530
Labels:           app=nginx
                  pod-template-hash=58f667675d
Annotations:      cni.projectcalico.org/containerID: f9a7ae8e899d5af7b3f27e73d188836f6474e42ee66c7eb775e3c6d15fe777ef
                  cni.projectcalico.org/podIP: 10.244.127.107/32
                  cni.projectcalico.org/podIPs: 10.244.127.107/32
Status:           Running
IP:               10.244.127.107
IPs:
  IP:           10.244.127.107
Controlled By:  ReplicaSet/nginx-deployment-58f667675d
Containers:
  nginx:
    Container ID:   containerd://ced3022b40bf1a4974f2babf10a098f5978ebc0b999308dfa67f341f34251d24
    Image:          nginx:latest
    Image ID:       docker.io/library/nginx@sha256:6784fb0834aa7dbbe12e3d7471e69c290df3e6ba810dc38b34ae33d3c1c05f7d
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Mon, 16 Jun 2025 19:59:06 +0530
    Ready:          True
    Restart Count:  0
    Environment:    
    Mounts:
      /usr/share/nginx/html from nfs-vol (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-k92cc (ro)
Conditions:
  Type                        Status
  PodReadyToStartContainers   True
  Initialized                 True
  Ready                       True
  ContainersReady             True
  PodScheduled                True
Volumes:
  nfs-vol:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  nfs-pvc
    ReadOnly:   false
  kube-api-access-k92cc:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s

# echo "ranjeet badhe with shared storage" > /usr/share/nginx/html/index.html

# df -k
Filesystem            1K-blocks    Used Available Use% Mounted on
overlay                62675048 9818032  52857016  16% /
tmpfs                     65536       0     65536   0% /dev
/dev/mapper/rhel-root  62675048 9818032  52857016  16% /etc/hosts
shm                       65536       0     65536   0% /dev/shm
192.168.0.30:/nfs     209088512 9555968 199532544   5% /usr/share/nginx/html
tmpfs                  32500668      12  32500656   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                  16301532       0  16301532   0% /proc/asound
tmpfs                  16301532       0  16301532   0% /proc/acpi
tmpfs                  16301532       0  16301532   0% /proc/scsi
tmpfs                  16301532       0  16301532   0% /sys/firmware
#




Web Access from Browser (Worker nodes)

Setting NFS Server

[root@nfsweb ~]# cat /etc/exports

/nfs 192.168.0.0/24(rw,sync,no_root_squash,no_subtree_check)

 mkdir -p /nfs
 chmod 777 /nfs

[root@nfsweb ~]#  exportfs -ra
[root@nfsweb ~]# exportfs -v
/nfs            192.168.0.0/24(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)


systemctl status firewalld
systemctl stop firewalld
systemctl disable firewalld

[root@nfsweb ~]# sestatus
SELinux status:                 disabled
[root@nfsweb ~]#

[root@kubeworker1 ~]# mount -t nfs 192.168.0.30:/nfs /mnt
[root@kubeworker1 ~]# df -h | grep nfs
df: /home/nfs: Stale file handle
192.168.0.30:/nfs      200G  9.2G  191G   5% /mnt

[root@kubeworker2 ~]# mount -t nfs 192.168.0.30:/nfs /mnt
[root@kubeworker2 ~]# df -h | grep nfs
192.168.0.30:/nfs      200G  9.2G  191G   5% /mnt

Leave a Reply

Your email address will not be published.