From e964ec8f74b644d66ca166a7524adcc3a82709c9 Mon Sep 17 00:00:00 2001
From: "Gabriel A. Giovanini"
+ If you turned off your DHCP server follow these steps to connect to FritzBox settings.
+ Friz.box turned off DHCP
+
+
+
+
+ On gnome turn the wired connection off on again to apply the settings. +
++ Note: why in the hell does FritzBox 7490 require a land-line telephone to be physically factory + reset? +
+ diff --git a/content/posts/2021-12-26Enable_NFS_on_K3S.html b/content/posts/2021-12-26Enable_NFS_on_K3S.html new file mode 100644 index 0000000..09f91e7 --- /dev/null +++ b/content/posts/2021-12-26Enable_NFS_on_K3S.html @@ -0,0 +1,60 @@ ++ By default K3S comes only with local-path storage class, and if you are + running + with more than one node in your cluster you may want to use a more “distributed” + solution. For may case I opted for NFS. +
++ To check the current storage class you can run: +
+k3s kubectl get storageclasses
+ + And it will print something like: +
+NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
+local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 154d
+ + To start adding First you need to install helm on your server. To do + so you may + run: +
+ +curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
+ + Be careful when running scripts directly into bash always check the source + Sometimes is also recommended to do not pipe directly to bash +
++ Once it is installed we need to add the NFS storage classes. It has two + providers, I have chose NFS Subdir + External Provisioner. +
++ Add the helm repo +
+ +helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
+ + Then we need to actually install the provider +
+helm install nfs-subdir-external-provisioner nfs-subdir-external-provisioner/nfs-subdir-external-provisioner
+--set nfs.server=x.x.x.x
+--set nfs.path=/exported/path
+
+ Set the nfs.server
and nfs.path
accordingly with your setup.
+
+ After that if we run k3s kubectl get storageclasses
it will now print another
+ NFS provider:
+
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
+local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 154d
+nfs-client cluster.local/nfs-subdir-external-provisioner Delete Immediate true 76m
++ This is based on Log in to + Docker Hub. It is just a bit different to use podman. +
++ First we should take a look at podman-login man page: +
man podman login
+
+ + It will give some valueable information like the location of auth.json file. Now we can login using podman: +
podman login registry.gitlab.com
+
+ Then check the auth.json
file located at ${XDG_RUNTIME_DIR}/containers/auth.json
(as described
+ by the manual). It will contain your auth config:
+
{
+ "auths": {
+ "registry.gitlab.com": {
+ "auth": "..."
+ }
+ }
+}
+
+ + Now copy that file over to the server and register it in k8s with the following command: +
+kubectl create secret generic regcred \
+ --from-file=.dockerconfigjson=auth.json \
+ --type=kubernetes.io/dockerconfigjson
+
+ + Once you have created you can list by kubectl get secret: +
+NAME TYPE DATA AGE
+regcred kubernetes.io/dockerconfigjson 1 53s
+
+- By default K3S comes only with local-path storage class, and if you are - running - with more than one node in your cluster you may want to use a more “distributed” - solution. For may case I opted for NFS. -
-- To check the current storage class you can run: -
-k3s kubectl get storageclasses
- - And it will print something like: -
-NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
-local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 154d
- - To start adding First you need to install helm on your server. To do - so you may - run: -
- -curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
- - Be careful when running scripts directly into bash always check the source - Sometimes is also recommended to do not pipe directly to bash -
-- Once it is installed we need to add the NFS storage classes. It has two - providers, I have chose NFS Subdir - External Provisioner. -
-- Add the helm repo -
- -helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
- - Then we need to actually install the provider -
-helm install nfs-subdir-external-provisioner nfs-subdir-external-provisioner/nfs-subdir-external-provisioner
---set nfs.server=x.x.x.x
---set nfs.path=/exported/path
-
- Set the nfs.server
and nfs.path
accordingly with your setup.
-
- After that if we run k3s kubectl get storageclasses
it will now print another
- NFS provider:
-
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
-local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 154d
-nfs-client cluster.local/nfs-subdir-external-provisioner Delete Immediate true 76m
-
- If you turned off your DHCP server follow these steps to connect to FritzBox settings.
-
-
- On gnome turn the wired connection off on again to apply the settings. -
-- Note: why in the hell does FritzBox 7490 require a land-line telephone to be physically factory - reset? -
-- This is based on Log in to - Docker Hub. It is just a bit different to use podman. -
-- First we should take a look at podman-login man page: -
man podman login
-
- - It will give some valueable information like the location of auth.json file. Now we can login using podman: -
podman login registry.gitlab.com
-
- Then check the auth.json
file located at ${XDG_RUNTIME_DIR}/containers/auth.json
(as described
- by the manual). It will contain your auth config:
-
{
- "auths": {
- "registry.gitlab.com": {
- "auth": "..."
- }
- }
-}
-
- - Now copy that file over to the server and register it in k8s with the following command: -
-kubectl create secret generic regcred \
- --from-file=.dockerconfigjson=auth.json \
- --type=kubernetes.io/dockerconfigjson
-
- - Once you have created you can list by kubectl get secret: -
-NAME TYPE DATA AGE
-regcred kubernetes.io/dockerconfigjson 1 53s
-
-