From 231f2cb2205988cf87062bc9f595307af1ed827f Mon Sep 17 00:00:00 2001 From: "Gabriel A. Giovanini" Date: Sun, 15 May 2022 15:34:36 +0200 Subject: feat: Add missing blog post Add the missing blog post from my hugo blog. Also add a locustfile so I can do some stress test locally. --- .gitignore | 1 + content/posts/2019-03-03Welcome_to_my_blog.html | 6 ++ ...ing_desktop_setup_with_ansible-pull_part-1.html | 94 +++++++++++++++++++ ...ing_desktop_setup_with_ansible-pull_part-2.html | 70 ++++++++++++++ ...Compiling_emacs_from_source_code_on_fedora.html | 31 +++++++ content/posts/2020-07-12Road_to_local_K8S.html | 101 +++++++++++++++++++++ .../posts/2020-07-14Friz_box_turned_off_DHCP.html | 12 ++- ...020-08-22Moving_from_Github_to_Gilab_pages.html | 38 ++++++++ content/posts/2021-12-26Enable_NFS_on_K3S.html | 37 ++++---- ...26K8S_private_gitlab_registry_using_podman.html | 38 -------- ...28K8S_private_gitlab_registry_using_podman.html | 38 ++++++++ contrib/locust/locustfile.py | 18 ++++ src/blog.rs | 13 ++- templates/post.html | 2 +- 14 files changed, 434 insertions(+), 65 deletions(-) create mode 100644 content/posts/2019-03-03Welcome_to_my_blog.html create mode 100644 content/posts/2019-03-07Automating_desktop_setup_with_ansible-pull_part-1.html create mode 100644 content/posts/2019-04-22Automating_desktop_setup_with_ansible-pull_part-2.html create mode 100644 content/posts/2019-11-16Compiling_emacs_from_source_code_on_fedora.html create mode 100644 content/posts/2020-07-12Road_to_local_K8S.html create mode 100644 content/posts/2020-08-22Moving_from_Github_to_Gilab_pages.html delete mode 100644 content/posts/2021-12-26K8S_private_gitlab_registry_using_podman.html create mode 100644 content/posts/2021-12-28K8S_private_gitlab_registry_using_podman.html create mode 100644 contrib/locust/locustfile.py diff --git a/.gitignore b/.gitignore index 96ef862..634077d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ target/ .idea/ +__pycache__/ diff --git a/content/posts/2019-03-03Welcome_to_my_blog.html b/content/posts/2019-03-03Welcome_to_my_blog.html new file mode 100644 index 0000000..10b1f05 --- /dev/null +++ b/content/posts/2019-03-03Welcome_to_my_blog.html @@ -0,0 +1,6 @@ +
+

+ On this blog, I'll be posting some personal projects that I'm working on + or just logging stuff that I don't want to forget. +

+
diff --git a/content/posts/2019-03-07Automating_desktop_setup_with_ansible-pull_part-1.html b/content/posts/2019-03-07Automating_desktop_setup_with_ansible-pull_part-1.html new file mode 100644 index 0000000..d06a648 --- /dev/null +++ b/content/posts/2019-03-07Automating_desktop_setup_with_ansible-pull_part-1.html @@ -0,0 +1,94 @@ +
+

+ Every time that I do a clean install on my machine it takes a few hours till I + get to point where I was before formatting it, install all packages, select + themes, icons, fonts, install IDEs, extensions and so on. After doing it a few + times I came to the conclusion that I would save time by spending time + automating this chore, and as a result, I could tinker a little more with my + system and not worry about spending a weekend re-installing everything (which + have happened more time that I'd like to remember). +

+

+ So after a few attempts using python and bash I ended with many files and + keep everything organized and concise turned out to be more tedious than the + setup itself. So there comes Ansible. + It is an enterprise-graded software used to automate tasks. It has A LOT OF + features and it can be really helpful if you're a sysadmin but for now we're + going to focuson + + Ansible Pull + + and + + Playbooks + . As better described: +

+ [Ansible-Pull] is used to up a remote copy of ansible on each managed + node, each set to run via cron and update playbook source via a source + repository. This inverts the default push architecture of Ansible into a + pull architecture, which has near-limitless scaling potential. + + Playbooks are Ansible’s configuration, deployment, and orchestration + language. They can describe a policy you want your remote systems to + enforce, or a set of steps in a general IT process. + (source) +
+

+

+ The goal is to pull and run a playbook remotely using a git repository. The + playbook will describe the tasks needed to setup our machine from scratch. +
+ But first lets tinker a bit a with playbooks locally with ansible-playbook, + to do so we need to add localhost to ansible's hosts list. Add it to + /etc/ansible/hosts: +

[all]
+localhost
+

+

+ As an experiment we're going to write a asks to install vim. Currently, I'm + using Fedora thus we going to use dnf modeule to install packages, but if + you're using another distribution look for a equivalent module like apt + module for Ubuntu. + + The playbook to install is quite simple: + +

# main.yaml
+- hosts: all
+  tasks:
+     - name: install vim
+       dnf:
+         name: vim
+         state: latest
+
+
host
+
it is required and it has to match our hosts otherwise the playbook won't run.
+
taks
+
+ it is the list of tasks that the playbook will perform, in this case + will be dnf install vim. +
+
+

+

+ To run a playbook use the command ansible-playbook commando to run main.yml + direct from disk, do to so just run the following command: +

sudo ansible-playbook --connection=local main.yml
+

+

+ After a few seconds, vim will be installed on your machine. +

PLAY [all] *************************************************************
+
+TASK [Gathering Facts] *************************************************
+ok: [localhost]
+
+TASK [install vim] *****************************************************
+ok: [localhost]
+
+PLAY RECAP *************************************************************
+localhost                  : ok=2    changed=0    unreachable=0    failed=0
+

+

+ This is the first step, next part we shall create a more complex playbook and + setup repository to run it remotely using ansible-pull. +

+
diff --git a/content/posts/2019-04-22Automating_desktop_setup_with_ansible-pull_part-2.html b/content/posts/2019-04-22Automating_desktop_setup_with_ansible-pull_part-2.html new file mode 100644 index 0000000..707ba7d --- /dev/null +++ b/content/posts/2019-04-22Automating_desktop_setup_with_ansible-pull_part-2.html @@ -0,0 +1,70 @@ +
+ See part 1 +

+ Now we're going to setup ansible to work with a git repository. The process is + quite similar to ansible-playbook, the only difference is that the source for + the playbook will be a remote repository and not a local file. Following the + previous example we'll get vim setup automated. +

+

+ Create a git repository wherever you see + fit, gitlab + and github offer free repositories. For + this task we need to add only two file: one for the yml file describing the + tasks and the .vimrc file. +

+

+ In the .vimrc add your own configuration, you can see + mine + + over here + , it is pretty simple as I don't use it but for simple text editing + (like this post) so you can start with that if you don't have one. +

+

+ The yml file will have two tasks, one is to install vim, just like we did in the part 1. +

# main.yml
+---
+- name: install vim
+  dnf:
+    name: vim
+    state: latest
+

+

+ To copy .vimrc file to your $HOME we going to + use copy + module: +

+

+ After we've added those two files to repository you will have be something + + like this. + +
+ Parms: +

+

+

+ Remember man is your best friend, take a look at man ansible-pull to know + more about its parameters. +

+

+ The best part you can quickly test and see the result by running my sample: +

ansible-pull \
+    -U https://gitlab.com/gabrielgio/homestation.git \
+    -C debcf3458df511aef9f7dca0cb73f6cf6baddd5d \
+    -i all \
+    main.yml
+

+

+ The idea here is to keep your repository as a source of truth when comes to + configuration, you can add ansible-pull to a CRON tab, so you just need to + push something to your repository and after a few minutes not only your + machine but all the machines that have it setup will run the playbooks. You + can use this method as a simple way to install software, update machines or + even distribute tooling company-wise. +

+
diff --git a/content/posts/2019-11-16Compiling_emacs_from_source_code_on_fedora.html b/content/posts/2019-11-16Compiling_emacs_from_source_code_on_fedora.html new file mode 100644 index 0000000..f47040f --- /dev/null +++ b/content/posts/2019-11-16Compiling_emacs_from_source_code_on_fedora.html @@ -0,0 +1,31 @@ +
+

+ Compiling emacs from source and installing on fedora. +

+

Installing packages

+

+ Install the following packages: +

sudo dnf install git autoconf make gcc texinfo \
+    gnutls-devel giflib-devel ncurses-devel \
+    libjpeg-turbo-devel giflib-devel gtk3-devel \
+    libXpm-devel libtiff-devel libxml2-devel -y
+

+

Cloning Repository

+

+ Clone repository + savannah.gnu.org +

git clone -b master git://git.sv.gnu.org/emacs.git
+

+

Compiling

+

+ Navigate to the emacs folder cd emacs and execute the following + steps: +

./autogen.sh
+./configure
+make -j$(nproc)
+sudo make install
+
+ After verify version with ./emacs --version, it + should be equal or higher than 28.0.50. +

+
diff --git a/content/posts/2020-07-12Road_to_local_K8S.html b/content/posts/2020-07-12Road_to_local_K8S.html new file mode 100644 index 0000000..5d34b27 --- /dev/null +++ b/content/posts/2020-07-12Road_to_local_K8S.html @@ -0,0 +1,101 @@ +
+

Goal

+

+ The goal is to deploy kubernetes on my local networks, and keep everything + as reproducible as possible. +

+

Stack

+

+ I'll use Fedora Core OS, Matchbox and Terraform + 1, a match the requirements for + Tectonic2.

+

Steps

+ +

Network Setup DHCP/TFTP/DNS

+

First learning the basics

+ +

+ To check open ports +

lsof -Pni | grep LISTEN
+

+

+ Run the provided6 image with dnsmasq and + PXE toolkit +

docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
+  -d -q \
+  --dhcp-range=192.168.1.3,192.168.1.254 \
+  --enable-tftp --tftp-root=/var/lib/tftpboot \
+  --dhcp-match=set:bios,option:client-arch,0 \
+  --dhcp-boot=tag:bios,undionly.kpxe \
+  --dhcp-match=set:efi32,option:client-arch,6 \
+  --dhcp-boot=tag:efi32,ipxe.efi \
+  --dhcp-match=set:efibc,option:client-arch,7 \
+  --dhcp-boot=tag:efibc,ipxe.efi \
+  --dhcp-match=set:efi64,option:client-arch,9 \
+  --dhcp-boot=tag:efi64,ipxe.efi \
+  --dhcp-userclass=set:ipxe,iPXE \
+  --dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
+  --address=/matchbox.example/192.168.1.2 \
+  --log-queries \
+  --log-dhcp
+

+

Matchbox

+

...

+

PXE network boot enviroment

+

...

+

Terraform Tectonic

+

...

+

Links

+
+ 1 + + https://coreos.com/tectonic/docs/latest/install/bare-metal/metal-terraform.html + +
+
+ 2 + + https://coreos.com/tectonic/docs/latest/install/bare-metal/requirements.html + +
diff --git a/content/posts/2020-07-14Friz_box_turned_off_DHCP.html b/content/posts/2020-07-14Friz_box_turned_off_DHCP.html index 7eb69ef..3ee5daf 100644 --- a/content/posts/2020-07-14Friz_box_turned_off_DHCP.html +++ b/content/posts/2020-07-14Friz_box_turned_off_DHCP.html @@ -1,9 +1,11 @@

- If you turned off your DHCP server follow these steps to connect to FritzBox settings. -
+ If you turned off your DHCP server follow these steps to connect to + FritzBox settings.

@@ -11,7 +13,7 @@ On gnome turn the wired connection off on again to apply the settings.

- Note: why in the hell does FritzBox 7490 require a land-line telephone to be physically factory - reset? + Note: why in the hell does FritzBox 7490 require a + land-line telephone to be physically factory reset?

diff --git a/content/posts/2020-08-22Moving_from_Github_to_Gilab_pages.html b/content/posts/2020-08-22Moving_from_Github_to_Gilab_pages.html new file mode 100644 index 0000000..5fb1d78 --- /dev/null +++ b/content/posts/2020-08-22Moving_from_Github_to_Gilab_pages.html @@ -0,0 +1,38 @@ +
+

+ This was quite simple, I had just to create a simple Gitlab pipeline job and + publish to pages this is done by: +

+
image: clojure:lein-2.7.0
+
+before_script:
+  - lein deps
+
+test:
+  script:
+    - lein test
+
+pages:
+  stage: deploy
+  script:
+    - lein package
+  artifacts:
+    paths:
+      - public
+  only:
+    - master
+
+
before_script
+
will download all the dependencies with lein deps.
+
test
+
it is self explanatory
+
pages
+
+ it will compile cljs into js with lein package into + public folder to later be published into gitlab pages. Take a + look at the artifacts property, it is used to say wich will + will be collected. +
+
+ +
diff --git a/content/posts/2021-12-26Enable_NFS_on_K3S.html b/content/posts/2021-12-26Enable_NFS_on_K3S.html index 09f91e7..22ddf33 100644 --- a/content/posts/2021-12-26Enable_NFS_on_K3S.html +++ b/content/posts/2021-12-26Enable_NFS_on_K3S.html @@ -1,10 +1,10 @@

- By default K3S comes only with local-path storage class, and if you are - running - with more than one node in your cluster you may want to use a more “distributed” - solution. For may case I opted for NFS. + By default K3S comes only + with local-path + storage class, and if you are running with more than one node in your + cluster you may want to use a more “distributed” solution. + For may case I opted for NFS.

To check the current storage class you can run: @@ -16,21 +16,22 @@

NAME                   PROVISIONER                                     RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
 local-path (default)   rancher.io/local-path                           Delete          WaitForFirstConsumer   false                  154d

- To start adding First you need to install helm on your server. To do - so you may - run: + To start adding First you need to + install helm on your server. + To do so you may run:

curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash

- Be careful when running scripts directly into bash always check the source - Sometimes is also recommended to do not pipe directly to bash + Be careful when running scripts directly into bash always check the + source Sometimes is also recommended to do not pipe directly to bash

- Once it is installed we need to add the NFS storage classes. It has two - providers, I have chose NFS Subdir - External Provisioner. + Once it is installed we need to add + the NFS + storage classes. It has two providers, I have + chose NFS + Subdir External Provisioner.

Add the helm repo @@ -46,12 +47,12 @@ local-path (default) rancher.io/local-path Delete --set nfs.path=/exported/path

- Set the nfs.server and nfs.path accordingly with your setup. + Set the nfs.server and nfs.path accordingly + with your setup.

-

- After that if we run k3s kubectl get storageclasses it will now print another - NFS provider: + After that if we run k3s kubectl get storageclasses it will + now print another NFS provider:

NAME                   PROVISIONER                                     RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
diff --git a/content/posts/2021-12-26K8S_private_gitlab_registry_using_podman.html b/content/posts/2021-12-26K8S_private_gitlab_registry_using_podman.html
deleted file mode 100644
index 470965c..0000000
--- a/content/posts/2021-12-26K8S_private_gitlab_registry_using_podman.html
+++ /dev/null
@@ -1,38 +0,0 @@
-
-

- This is based on Log in to - Docker Hub. It is just a bit different to use podman. -

-

- First we should take a look at podman-login man page: -

man podman login
-

-

- It will give some valueable information like the location of auth.json file. Now we can login using podman: -

podman login registry.gitlab.com
-

-

Then check the auth.json file located at ${XDG_RUNTIME_DIR}/containers/auth.json (as described - by the manual). It will contain your auth config: -

{
-	"auths": {
-		"registry.gitlab.com": {
-			"auth": "..."
-		}
-	}
-}
-

-

- Now copy that file over to the server and register it in k8s with the following command: -


-kubectl create secret generic regcred \
-    --from-file=.dockerconfigjson=auth.json \
-    --type=kubernetes.io/dockerconfigjson
-

-

- Once you have created you can list by kubectl get secret: -


-NAME     TYPE                                  DATA   AGE
-regcred  kubernetes.io/dockerconfigjson        1      53s
-

-
diff --git a/content/posts/2021-12-28K8S_private_gitlab_registry_using_podman.html b/content/posts/2021-12-28K8S_private_gitlab_registry_using_podman.html new file mode 100644 index 0000000..470965c --- /dev/null +++ b/content/posts/2021-12-28K8S_private_gitlab_registry_using_podman.html @@ -0,0 +1,38 @@ +
+

+ This is based on Log in to + Docker Hub. It is just a bit different to use podman. +

+

+ First we should take a look at podman-login man page: +

man podman login
+

+

+ It will give some valueable information like the location of auth.json file. Now we can login using podman: +

podman login registry.gitlab.com
+

+

Then check the auth.json file located at ${XDG_RUNTIME_DIR}/containers/auth.json (as described + by the manual). It will contain your auth config: +

{
+	"auths": {
+		"registry.gitlab.com": {
+			"auth": "..."
+		}
+	}
+}
+

+

+ Now copy that file over to the server and register it in k8s with the following command: +


+kubectl create secret generic regcred \
+    --from-file=.dockerconfigjson=auth.json \
+    --type=kubernetes.io/dockerconfigjson
+

+

+ Once you have created you can list by kubectl get secret: +


+NAME     TYPE                                  DATA   AGE
+regcred  kubernetes.io/dockerconfigjson        1      53s
+

+
diff --git a/contrib/locust/locustfile.py b/contrib/locust/locustfile.py new file mode 100644 index 0000000..ca7f8f3 --- /dev/null +++ b/contrib/locust/locustfile.py @@ -0,0 +1,18 @@ +from locust import HttpUser, task + +class HelloWorldUser(HttpUser): + @task + def index(self): + self.client.get("/") + + @task + def posts(self): + self.client.get("/posts/2021-12-28K8S_private_gitlab_registry_using_podman.html") + self.client.get("/posts/2021-12-26Enable_NFS_on_K3S.html") + self.client.get("/posts/2020-08-22Moving_from_Github_to_Gilab_pages.html") + self.client.get("/posts/2020-07-14Friz_box_turned_off_DHCP.html") + self.client.get("/posts/2020-07-12Road_to_local_K8S.html") + self.client.get("/posts/2019-11-16Compiling_emacs_from_source_code_on_fedora.html") + self.client.get("/posts/2019-04-22Automating_desktop_setup_with_ansible-pull_part-2.html") + self.client.get("/posts/2019-03-07Automating_desktop_setup_with_ansible-pull_part-1.html") + self.client.get("/posts/2019-03-03Welcome_to_my_blog.html") diff --git a/src/blog.rs b/src/blog.rs index e549fb2..6bbda49 100644 --- a/src/blog.rs +++ b/src/blog.rs @@ -3,8 +3,9 @@ use sailfish::TemplateOnce; use chrono::NaiveDate; use regex::{Regex}; use std::str; +use std::cmp::{PartialOrd, Ord, PartialEq, Eq}; -const BLOG_REGEX: &str = r"(?P[\d]{4}-[\d]{2}-[\d]{2})(?P[a-zA-Z0-9_]*)"; +const BLOG_REGEX: &str = r"(?P<date>[\d]{4}-[\d]{2}-[\d]{2})(?P<title>[a-zA-Z0-9-_]*)"; #[derive(RustEmbed)] #[folder = "content/posts/"] @@ -25,6 +26,7 @@ struct PostTemplate { date: String } +#[derive(PartialEq, Eq, PartialOrd, Ord)] pub struct BlogEntry { pub title: String, pub datetime: NaiveDate, @@ -46,10 +48,15 @@ impl BlogEntry { } pub fn read_assets() -> Vec<BlogEntry> { - PostAsset::iter() + + let mut entries: Vec<BlogEntry> = PostAsset::iter() .map(|e| format!("{}", e)) .map(|e| BlogEntry::new(&e)) - .collect() + .collect(); + + entries.sort_by(|a, b| b.datetime.cmp(&a.datetime)); + + entries } } diff --git a/templates/post.html b/templates/post.html index 4e5cf9a..7e0a909 100644 --- a/templates/post.html +++ b/templates/post.html @@ -7,7 +7,7 @@ <% include!("header.html"); %> <main class="container"> <h2><%- title %></h2> - <h5>created at: <%- date %></h2> + <h5>Created At: <%- date %></h2> <%- content %> </section> </main> -- cgit v1.2.3