Automating Kubernetes Installation with Ansible

Automating Kubernetes Installation with Ansible

In today's world of containerized applications, Kubernetes has emerged as the de facto standard for container orchestration. Deploying and managing Kubernetes clusters manually can be complex and time-consuming. Thankfully, automation tools like Ansible provide a powerful way to automate the installation and configuration of Kubernetes clusters.

In this blog post, we'll explore how to use Ansible to automate the installation of a Kubernetes cluster. We'll walk through the steps involved in setting up Ansible, creating playbooks, and deploying Kubernetes on a set of target servers.

SETUP

Ansible Node ansible.mylabserver.com

Master Node master.mylabserver.com

Worker Nodes worker1.mylabserver.com

worker2.mylabserver.com

Prerequisite

1) Ansible must be installed on all the nodes

2) Set Password less authentication between Ansible Node to Master and Worker Nodes

Lets Create Playbook

Create ansible.cfg

root@06432e7f921c:~#mkdir k8s-install
root@06432e7f921c:~#cd k8s-install
root@06432e7f921c:~/k8s-install#cat ansible.cfg
[defaults]
inventory=./inventory
deprecation_warnings=False

Create Inventory File

root@06432e7f921c:~/k8s-install# cat inventory
[master]
master.mylabserver.com

[worker]
worker1.mylabserver.com
worker2.mylabserver.com

Verify they are accessible from Ansible Node

root@06432e7f921c:~/k8s-install# ansible -i inventory -m ping all
worker1.mylabserver.com | SUCCESS => {
    "ansible_facts": {
        "discovered_interpreter_python": "/usr/bin/python"
    },
    "changed": false,
    "ping": "pong"
}
worker2.mylabserver.com | SUCCESS => {
    "ansible_facts": {
        "discovered_interpreter_python": "/usr/bin/python"
    },
    "changed": false,
    "ping": "pong"
}
master.mylabserver.com | SUCCESS => {
    "ansible_facts": {
        "discovered_interpreter_python": "/usr/bin/python"
    },
    "changed": false,
    "ping": "pong"
}

Create main.yaml file

root@06432e7f921c:~/k8s-install# cat main.yaml
---
- hosts: [master,worker]
  gather_facts: false
  roles:
     - role: k8s-config
       tags: common

- hosts: [master]
  gather_facts: false
  roles:
   - role: master-config
     tags: master

- hosts: [worker]
  gather_facts: false
  roles:
    - role: worker-config
      tags: worker

Create Role

root@06432e7f921c:~/k8s-install# ansible-galaxy init k8s-config
root@06432e7f921c:~/k8s-install# ansible-galaxy init master-config
root@06432e7f921c:~/k8s-install# ansible-galaxy init worker-config

Create Playbook for Common Installation

root@06432e7f921c:~/k8s-install# cat roles/k8s-config/tasks/main.yml
---
- name: Disable swap
  command: swapoff -a
  ignore_errors: yes

- name: Remove swap entry from /etc/fstab
  lineinfile:
    path: /etc/fstab
    state: absent
    regexp: '^.*swap.*$'

- name: Stop firewalld service
  service:
    name: ufw
    state: stopped
    enabled: no

- name: Ensure overlay and br_netfilter are present in containerd.conf
  lineinfile:
    path: /etc/modules-load.d/containerd.conf
    line: "{{ item }}"
    create: yes
  loop:
    - overlay
    - br_netfilter

- name: Load Kernel modules
  modprobe:
     name: "{{ item }}"
  loop:
     - overlay
     - br_netfilter

- name: Ensure kernel settings are present in kubernetes.conf
  lineinfile:
    path: /etc/sysctl.d/kubernetes.conf
    line: "{{ item }}"
    create: yes
  loop:
    - "net.bridge.bridge-nf-call-ip6tables = 1"
    - "net.bridge.bridge-nf-call-iptables = 1"
    - "net.ipv4.ip_forward = 1"

- name: Apply kernel settings
  command: sysctl --system

- name: Set DEBIAN_FRONTEND environment variable
  shell:
    cmd: export DEBIAN_FRONTEND=noninteractive

- name: Add Kubernetes GPG key
  apt_key:
     url: "https://packages.cloud.google.com/apt/doc/apt-key.gpg"
     state: present

- name: Update apt repositories
  apt:
   update_cache: yes

- name: Install required packages
  apt:
     name: "{{ item }}"
     state: present
  loop:
    - apt-transport-https
    - ca-certificates
    - curl
    - gnupg
    - lsb-release

- name: Ensure /etc/apt/keyrings directory exists
  file:
    path: /etc/apt/keyrings
    state: directory
    mode: '0755'

- name: Check if Docker repository file exists
  stat:
    path: /etc/apt/sources.list.d/docker.list
  register: docker_repo_file

- name: Add Docker repository using echo
  shell:
    cmd: 'echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null'
  when: docker_repo_file.stat.exists == false

- name: Run apt update
  apt:
    update_cache: yes


- name: Install containerd.io package
  apt:
    name: containerd.io
    state: present


- name: Generate default containerd configuration
  command:
     cmd: containerd config default > /etc/containerd/config.toml

- name: Set SystemdCgroup to true in containerd configuration
  replace:
     path: /etc/containerd/config.toml
     regexp: 'SystemdCgroup\s*=\s*false'
     replace: 'SystemdCgroup = true'

- name: Restart containerd service
  service:
    name: containerd
    state: restarted

- name: Enable containerd service
  service:
     name: containerd
     enabled: yes

- name: Download Kubernetes GPG key
  shell:
    cmd: curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg --yes

- name: Add Kubernetes repository
  copy:
     content: |
        deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /
     dest: /etc/apt/sources.list.d/kubernetes.list

- name: Update apt repositories
  apt:
    update_cache: yes

- name: Install Kubernetes components
  apt:
    name: "{{ item }}"
    state: present
  loop:
      - kubeadm
      - kubelet
      - kubectl

Create Playbook to Create Cluster

root@06432e7f921c:~/k8s-install# cat roles/master-config/tasks/main.yml
---
- name: Initialize Kubernetes Cluster
  shell:
     cmd: "kubeadm init --apiserver-advertise-address=172.31.120.93 --pod-network-cidr=192.168.0.0/16 >> /root/kubeinit.log 2>/dev/null"
     creates: /root/.kube/config

- name: Deploy Calico network
  shell:
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.0/manifests/tigera-operator.yaml && kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.0/manifests/custom-resources.yaml"
    creates: /etc/kubernetes/admin.conf

- name: Generate and save cluster join command to /joincluster.sh
  shell:
    cmd: "kubeadm token create --print-join-command > /joincluster.sh"
    creates: /joincluster.sh

Create Playbook for worker node

root@06432e7f921c:~/k8s-install# cat roles/worker-config/tasks/main.yml
---
- name: Copy joincluster script to worker nodes
  copy:
    src: /joincluster.sh
    dest: /joincluster.sh
    mode: 0755

- name: Execute joincluster.sh script
  shell:
     cmd: /joincluster.sh

Install Common packages on all the 3 nodes

root@06432e7f921c:~/k8s-install# ansible-playbook main.yaml --tags common

PLAY [master,worker] ************************************************************************************************************************************************************************

TASK [k8s-config : Disable swap] ************************************************************************************************************************************************************
changed: [worker1.mylabserver.com]
changed: [master.mylabserver.com]
changed: [worker2.mylabserver.com]

TASK [k8s-config : Remove swap entry from /etc/fstab] ***************************************************************************************************************************************
ok: [worker1.mylabserver.com]
ok: [master.mylabserver.com]
ok: [worker2.mylabserver.com]

TASK [k8s-config : Stop firewalld service] **************************************************************************************************************************************************
ok: [worker1.mylabserver.com]
ok: [master.mylabserver.com]
ok: [worker2.mylabserver.com]

TASK [k8s-config : Ensure overlay and br_netfilter are present in containerd.conf] **********************************************************************************************************
ok: [worker1.mylabserver.com] => (item=overlay)
ok: [master.mylabserver.com] => (item=overlay)
ok: [worker2.mylabserver.com] => (item=overlay)
ok: [worker1.mylabserver.com] => (item=br_netfilter)
ok: [master.mylabserver.com] => (item=br_netfilter)
ok: [worker2.mylabserver.com] => (item=br_netfilter)

TASK [k8s-config : Load Kernel modules] *****************************************************************************************************************************************************
ok: [worker1.mylabserver.com] => (item=overlay)
ok: [master.mylabserver.com] => (item=overlay)
ok: [worker2.mylabserver.com] => (item=overlay)
ok: [worker1.mylabserver.com] => (item=br_netfilter)
ok: [master.mylabserver.com] => (item=br_netfilter)
ok: [worker2.mylabserver.com] => (item=br_netfilter)

TASK [k8s-config : Ensure kernel settings are present in kubernetes.conf] *******************************************************************************************************************
ok: [worker1.mylabserver.com] => (item=net.bridge.bridge-nf-call-ip6tables = 1)
ok: [worker2.mylabserver.com] => (item=net.bridge.bridge-nf-call-ip6tables = 1)
ok: [master.mylabserver.com] => (item=net.bridge.bridge-nf-call-ip6tables = 1)
ok: [worker1.mylabserver.com] => (item=net.bridge.bridge-nf-call-iptables = 1)
ok: [worker2.mylabserver.com] => (item=net.bridge.bridge-nf-call-iptables = 1)
ok: [master.mylabserver.com] => (item=net.bridge.bridge-nf-call-iptables = 1)
ok: [worker1.mylabserver.com] => (item=net.ipv4.ip_forward = 1)
ok: [worker2.mylabserver.com] => (item=net.ipv4.ip_forward = 1)
ok: [master.mylabserver.com] => (item=net.ipv4.ip_forward = 1)

TASK [k8s-config : Apply kernel settings] ***************************************************************************************************************************************************
changed: [worker1.mylabserver.com]
changed: [master.mylabserver.com]
changed: [worker2.mylabserver.com]

TASK [k8s-config : Set DEBIAN_FRONTEND environment variable] ********************************************************************************************************************************
changed: [worker1.mylabserver.com]
changed: [master.mylabserver.com]
changed: [worker2.mylabserver.com]

TASK [k8s-config : Add Kubernetes GPG key] **************************************************************************************************************************************************
ok: [worker1.mylabserver.com]
ok: [master.mylabserver.com]
ok: [worker2.mylabserver.com]

TASK [k8s-config : Update apt repositories] *************************************************************************************************************************************************
changed: [worker1.mylabserver.com]
changed: [master.mylabserver.com]
changed: [worker2.mylabserver.com]

TASK [k8s-config : Install required packages] ***********************************************************************************************************************************************
ok: [worker1.mylabserver.com] => (item=apt-transport-https)
ok: [master.mylabserver.com] => (item=apt-transport-https)
ok: [worker2.mylabserver.com] => (item=apt-transport-https)
ok: [worker1.mylabserver.com] => (item=ca-certificates)
ok: [master.mylabserver.com] => (item=ca-certificates)
ok: [worker2.mylabserver.com] => (item=ca-certificates)
ok: [worker1.mylabserver.com] => (item=curl)
ok: [master.mylabserver.com] => (item=curl)
ok: [worker1.mylabserver.com] => (item=gnupg)
ok: [worker2.mylabserver.com] => (item=curl)
ok: [master.mylabserver.com] => (item=gnupg)
ok: [worker1.mylabserver.com] => (item=lsb-release)
ok: [worker2.mylabserver.com] => (item=gnupg)
ok: [master.mylabserver.com] => (item=lsb-release)
ok: [worker2.mylabserver.com] => (item=lsb-release)

TASK [k8s-config : Ensure /etc/apt/keyrings directory exists] *******************************************************************************************************************************
ok: [worker1.mylabserver.com]
ok: [worker2.mylabserver.com]
ok: [master.mylabserver.com]

TASK [k8s-config : Check if Docker repository file exists] **********************************************************************************************************************************
ok: [worker1.mylabserver.com]
ok: [master.mylabserver.com]
ok: [worker2.mylabserver.com]

TASK [k8s-config : Add Docker repository using echo] ****************************************************************************************************************************************
skipping: [master.mylabserver.com]
skipping: [worker2.mylabserver.com]
skipping: [worker1.mylabserver.com]

TASK [k8s-config : Run apt update] **********************************************************************************************************************************************************
changed: [worker1.mylabserver.com]
changed: [worker2.mylabserver.com]
changed: [master.mylabserver.com]

TASK [k8s-config : Install containerd.io package] *******************************************************************************************************************************************
changed: [worker1.mylabserver.com]
changed: [worker2.mylabserver.com]
changed: [master.mylabserver.com]

TASK [k8s-config : Generate default containerd configuration] *******************************************************************************************************************************
changed: [worker1.mylabserver.com]
changed: [master.mylabserver.com]
changed: [worker2.mylabserver.com]

TASK [k8s-config : Set SystemdCgroup to true in containerd configuration] *******************************************************************************************************************
ok: [worker1.mylabserver.com]
ok: [worker2.mylabserver.com]
ok: [master.mylabserver.com]

TASK [k8s-config : Restart containerd service] **********************************************************************************************************************************************
changed: [worker1.mylabserver.com]
changed: [master.mylabserver.com]
changed: [worker2.mylabserver.com]

TASK [k8s-config : Enable containerd service] ***********************************************************************************************************************************************
ok: [worker1.mylabserver.com]
ok: [master.mylabserver.com]
ok: [worker2.mylabserver.com]

TASK [k8s-config : Download Kubernetes GPG key] *********************************************************************************************************************************************
[WARNING]: Consider using the get_url or uri module rather than running 'curl'.  If you need to use command because get_url or uri is insufficient you can add 'warn: false' to this command
task or set 'command_warnings=False' in ansible.cfg to get rid of this message.
changed: [master.mylabserver.com]
changed: [worker2.mylabserver.com]
changed: [worker1.mylabserver.com]

TASK [k8s-config : Add Kubernetes repository] ***********************************************************************************************************************************************
ok: [worker1.mylabserver.com]
ok: [master.mylabserver.com]
ok: [worker2.mylabserver.com]

TASK [k8s-config : Update apt repositories] *************************************************************************************************************************************************
changed: [worker1.mylabserver.com]
changed: [worker2.mylabserver.com]
changed: [master.mylabserver.com]

TASK [k8s-config : Install Kubernetes components] *******************************************************************************************************************************************
ok: [worker1.mylabserver.com] => (item=kubeadm)
ok: [master.mylabserver.com] => (item=kubeadm)
ok: [worker2.mylabserver.com] => (item=kubeadm)
ok: [worker1.mylabserver.com] => (item=kubelet)
ok: [master.mylabserver.com] => (item=kubelet)
ok: [worker2.mylabserver.com] => (item=kubelet)
ok: [worker1.mylabserver.com] => (item=kubectl)
ok: [worker2.mylabserver.com] => (item=kubectl)
ok: [master.mylabserver.com] => (item=kubectl)

Install Cluster On Master Node

root@06432e7f921c:~/k8s-install# ansible-playbook main.yaml --tags master
PLAY [master] *******************************************************************************************************************************************************************************

TASK [master-config : Initialize Kubernetes Cluster] ****************************************************************************************************************************************
changed: [master.mylabserver.com]

TASK [master-config : Deploy Calico network] ************************************************************************************************************************************************
changed: [master.mylabserver.com]

TASK [master-config : Generate and save cluster join command to /joincluster.sh] ************************************************************************************************************
changed: [master.mylabserver.com]

PLAY RECAP **********************************************************************************************************************************************************************************
master.mylabserver.com     : ok=26   changed=12   unreachable=0    failed=0    skipped=1    rescued=0    ignored=0

Install Cluster On Worker Node

root@06432e7f921c:~/k8s-install# ansible-playbook main.yaml --tags worker

PLAY [worker] *******************************************************************************************************************************************************************************

TASK [worker-config : Copy joincluster script to worker nodes] ******************************************************************************************************************************
ok: [worker1.mylabserver.com]
ok: [worker2.mylabserver.com]

TASK [worker-config : Execute joincluster.sh script] ****************************************************************************************************************************************
changed: [worker1.mylabserver.com]
changed: [worker2.mylabserver.com]

PLAY RECAP **********************************************************************************************************************************************************************************
worker1.mylabserver.com    : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0
worker2.mylabserver.com    : ok=2    changed=1    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0

Create a .kube file and copy admin to .kube dir

root@06432e7f921c:~/k8s-install# mkdir /root/.kube
root@06432e7f921c:~/k8s-install# cp /etc/kubernetes/admin.conf /root/.kube/config

Lets Verify Cluster Configured properly

root@06432e7f921c:~/k8s-install# kubectl get nodes
NAME                      STATUS   ROLES           AGE   VERSION
master.mylabserver.com    Ready    control-plane   54m   v1.29.3
worker1.mylabserver.com   Ready    <none>          43m   v1.29.3
worker2.mylabserver.com   Ready    <none>          43m   v1.29.3

Conclusion Automating the installation of Kubernetes using Ansible can significantly simplify the process and reduce the chance of errors. With Ansible playbooks, we can quickly provision and configure Kubernetes clusters on any infrastructure, whether it's on-premises or in the cloud.

In this blog post, we've only scratched the surface of what's possible with Ansible. As you become more familiar with Ansible and Kubernetes, you can further customize your playbooks to meet your specific requirements and integrate additional automation tasks.

Happy automating!

Maximizing CI/CD Efficiency: A Guide to GitLab Runner Setup as Selfhosted runner on Kubernetes

Maximizing CI/CD Efficiency: A Guide to GitLab Runner Setup as Selfhosted runner on Kubernetes

Introduction

enter image description here

GitLab Runner is an open-source project that works in conjunction with GitLab CI/CD pipelines to automate the process of building, testing, and deploying software. It can run various types of jobs, including builds, tests, and deployments, based on the instructions provided in the .gitlab-ci.yml configuration file.

Why Use GitLab Runner on Kubernetes?

Scalability: Kubernetes allows for easy scaling of resources. GitLab Runner deployments can dynamically scale based on workload demands, ensuring optimal resource utilization.

Isolation: Kubernetes provides container orchestration, allowing GitLab Runner jobs to run in isolated environments (pods). This isolation ensures that jobs do not interfere with each other and provides security benefits.

Resource Efficiency: GitLab Runner on Kubernetes can efficiently utilize cluster resources by scheduling jobs on available nodes, thereby maximizing resource utilization and minimizing idle capacity.

Consistency: Running GitLab Runners on Kubernetes ensures consistency across different environments, whether it's development, testing, or production. The same Kubernetes environment can be used to run CI/CD pipelines consistently.

Key Components

GitLab Runner: The agent responsible for executing CI/CD jobs defined in .gitlab-ci.yml files. It interacts with GitLab CI/CD and Kubernetes API to schedule and run jobs in Kubernetes pods.

Kubernetes: An open-source container orchestration platform that automates the deployment, scaling, and management of containerized applications. GitLab Runner utilizes Kubernetes to manage the lifecycle of CI/CD job pods.

Helm: Helm is a package manager for Kubernetes that allows you to define, install, and manage applications on Kubernetes. GitLab Runner can be deployed on Kubernetes using Helm charts provided by GitLab.

Install Gitlab-runner on Kubernetes

1- Add the helm repo

root@master:~# helm repo add gitlab https://charts.gitlab.io
"gitlab" already exists with the same configuration, skipping

2- Update the repo

root@master:~#helm repo update gitlab
"gitlab" already exists with the same configuration, skipping
root@master:~# helm repo update gitlab
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "gitlab" chart repository
Update Complete. ⎈Happy Helming!⎈

3- Create a values.yaml file

root@master:~# cat values.yaml
env:
  open:
    STORAGE: local
persistence:
  enabled: true
  accessMode: ReadWriteOnce
  size: 8Gi
  storageClass: "managed-nfs-storage"
root@master:~# cat values.yaml
gitlabUrl: https://gitlab.com/

runnerRegistrationToken: "gitlab-runner-token"

concurrent: 10

checkInterval: 30

rbac:
  create: true
  rules:
    - apiGroups: [""]
      resources: ["pods"]
      verbs: ["list", "get", "watch", "create", "delete"]
    - apiGroups: [""]
      resources: ["pods/exec"]
      verbs: ["create"]
    - apiGroups: [""]
      resources: ["pods/log"]
      verbs: ["get"]
    - apiGroups: [""]
      resources: ["pods/attach"]
      verbs: ["list", "get", "create", "delete", "update"]
    - apiGroups: [""]
      resources: ["secrets"]
      verbs: ["list", "get", "create", "delete", "update"]
    - apiGroups: [""]
      resources: ["configmaps"]
      verbs: ["list", "get", "create", "delete", "update"]

runners:
  privileged: true

  config: |
    [[runners]]
      [runners.kubernetes]
        namespace = "gitlab-runner"
        tls_verify = false
        image = "docker:19"
        privileged = false

4- Create the namespace and deploy the helm

root@master:~# kubectl create ns gitlab-runner

root@master:~# helm install gitlab-runner gitlab/gitlab-runner -f values.yaml --namespace gitlab-runner
NAME: gitlab-runner
LAST DEPLOYED: Mon Mar  4 22:09:02 2024
NAMESPACE: gitlab-runner
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
Your GitLab Runner should now be registered against the GitLab instance reachable at: "https://gitlab.com/"

5- Verify the runner

root@master:~# kubectl -n gitlab-runner get pods
NAME                             READY   STATUS    RESTARTS   AGE
gitlab-runner-7b8ff76bff-mptdc   1/1     Running   0          39s

Login to https://gitlab.com and verify the runner registration

enter image description here

Create a project in gitlab enter image description here

Create .gitlab-ci.yml and use kubernetes as tag to run the pipeline on Kubernetes

enter image description here

Run the Pipeline

Run the command to verify a new runner get start in gitlab-runner

root@master:~# kubectl -n gitlab-runner get pods
NAME                                                      READY   STATUS     RESTARTS   AGE
gitlab-runner-7b8ff76bff-mptdc                            1/1     Running    0          18m
runner-kea6jzghg-project-45006412-concurrent-0-1heswzap   0/2     Init:0/1   0          56s

Verify the pipeline in Gitlab UI enter image description here

We are able to run our pipeline jobs on our self-hosted runner in kubernetes

Streamlining Configuration Management with Ansible Semaphore's Intuitive UI

Streamlining Configuration Management with Ansible Semaphore's Intuitive UI

Configuration Management Tool Ansible

enter image description here

Ansible is a powerful configuration management tool that enables organizations to automate the provisioning, configuration, and management of IT infrastructure in a simple, efficient, and consistent manner. It uses a declarative language and follows the Infrastructure as Code (IaC) paradigm, allowing administrators to define infrastructure configurations in human-readable YAML files.

Key features and benefits of Ansible as a configuration management tool include:

Agentless Architecture: Ansible operates in an agentless manner, which means it doesn't require any software to be installed on managed nodes. This simplifies deployment and reduces overhead, making it easy to manage a large number of systems.

Simple and Human-Readable Syntax: Ansible playbooks, written in YAML, are easy to read, write, and understand. This enables infrastructure configurations to be managed efficiently by both system administrators and developers.

Idempotent Execution: Ansible ensures idempotent execution, meaning that running the same playbook multiple times will result in the same desired state, regardless of the initial state of the system. This ensures consistency and predictability in configuration management tasks.

Module-Based Architecture: Ansible provides a wide range of modules that abstract common system administration tasks, such as package management, file manipulation, user management, and service control. These modules can be used to configure various aspects of the infrastructure without the need for custom scripting.

Role-Based Organization: Ansible allows administrators to organize playbooks and tasks into reusable units called roles. Roles promote modularity, code reuse, and maintainability, making it easier to manage complex infrastructure configurations.

Integration and Extensibility: Ansible integrates seamlessly with existing tools, systems, and processes, allowing for easy integration into CI/CD pipelines, monitoring systems, and other automation workflows. Additionally, Ansible's modular architecture makes it extensible, allowing users to develop custom modules and plugins to extend its functionality.

Community and Ecosystem: Ansible benefits from a large and active community of users, contributors, and developers. This vibrant ecosystem provides access to a wealth of pre-built roles, modules, and playbooks through Ansible Galaxy, accelerating development and reducing time to value.

Way of using Ansible.

We can use ansible in 2 ways 1- Control Node/Managed Node 2. Ansible Tower/AWX(UI Based helpful to manage large level organization centrally )

In this blog I will be discussing about 1st way of using ansible where we use to have one control node and from control node we use to push the code to all managed nodes. This way of working with ansible was good for a small organization, but there was a drawback that there was not any history of successful and failed job and admin need to run the playbook login from the control node or using CICD tool.

enter image description here

Now we have a tool called Ansible Semaphore using that we can run our playbook from the UI and can keep track of successful and failed jobs.

Configure Ansible Semaphore on your control node.

Installing Semaphore using Snap

root@master:~# apt install snap

root@master:~# snap install semaphore

root@master:~# snap stop semaphore

Add a admin user to to Login to Semaphore UI

root@master:~# sudo semaphore user add --admin --login amit --name=Amit Chaturvedi --email=test@gmail.com --password=1234

root@master:~# snap start semaphore

Lets Login to Semaphore UI enter image description here

Use the same username and password we created in the above command

We will land to a page where it will ask to create a new project.

enter image description here

Click on new Project and create a new project enter image description here

enter image description here

I am keeping my playbooks into github repo

enter image description here

Lets run the playbook from UI

Step-1 Create KeyStore to store credential to clone git repo.

Click on key store enter image description here

Click on New Key enter image description here

In Username/Password give PAT created in your Github account

Select the type and create enter image description here

Step-2 Create Environment enter image description here

In extra variable I am not passing any variable so keeping it empty enter image description here

Create The Environment enter image description here

Step-3 Create Environment

Click on Create Inventory enter image description here

We have to pass the inventory list like list of managed node

We have 3 types 1) via file 2) static 3) Static yml. I am using file type enter image description here

Click on create Inventory enter image description here

Step4: Create Repository where you have kept the code so Semaphore will clone the repo locally

enter image description here

Provide the details and save it

enter image description here

enter image description here

Step-5: Lets create Task Template

Click on Create New Template enter image description here

Provide the details of your repo, credentials and others

enter image description here

enter image description here

Lets Run the Template and see the magic

Click on Run

enter image description here

If every thing configure properly. Your playbook will run successfully.

enter image description here

Now Click on Dashboard to See the Status

enter image description here

Hope Now you will find how easy to manage ansible using Semaphore

Accelerating Kubernetes Operations with Kubernetes Go Client.

Accelerating Kubernetes Operations with Kubernetes Go Client.

Introduction: Explore the powerful combination of Kubernetes and Go programming in this blog post. Learn how the Kubernetes Go client empowers developers to seamlessly interact with Kubernetes clusters, providing a flexible and efficient way to automate operations.

Prerequisites:

- Kubernetes Cluster running using minikube, AKS,EKS or GKE

- Go installed in machine.

- Go programming basic understanding

- Go Client Repo

API Resources, Kinds, and Objects Resource Type: In Kubernetes, a "Resource Type" refers to a specific kind of object or entity that can be managed within the Kubernetes cluster. Kubernetes uses a declarative model where users define the desired state of their applications or infrastructure in the form of YAML or JSON manifests. These manifests describe the configuration of various resource types, and Kubernetes is responsible for ensuring that the actual state of the cluster matches the desired state.

API Group: In Kubernetes, an API group is a way to organize and categorize related sets of APIs. The Kubernetes API is designed to be extensible, and API groups help manage the complexity of the API surface by grouping related resources together.The structure of a Kubernetes API endpoint typically follows the pattern:

/<API_GROUP>/<API_VERSION>/...

Here: API_GROUP: Identifies the group to which a particular resource belongs. API_VERSION: Specifies the version of the API.

Object: In Kubernetes, an "object" is a basic building block or unit of the system. Objects represent the state of the cluster and can be created, modified, or deleted to manage applications and other aspects of the system. Objects are defined in Kubernetes manifests, typically written in YAML or JSON, and are submitted to the Kubernetes API server for processing.

Kind: In Kubernetes, "Kind" is a field within the metadata of a Kubernetes object that specifies the type or kind of the object. It is a required field and defines the type of resource being created, modified, or interacted with in the cluster. The kind field indicates the object's role and how it should be handled by Kubernetes. Image description Module k8s.io/api The "k8s.io/client-go" module is the official Go client library for Kubernetes. It provides a set of packages and utilities to interact with the Kubernetes API and perform operations such as creating, updating, and deleting resources, as well as watching for changes in the cluster. To use the Kubernetes Go client libraries in your Go application, you typically import specific packages from "k8s.io/client-go."

Module k8s.io/apimachinery The k8s.io/apimachinery module in Go is part of the Kubernetes Go client libraries and provides a set of packages for working with Kubernetes objects and their metadata. It includes functionality for handling object serialization, conversion, and various utility functions related to Kubernetes API objects. Key packages within k8s.io/apimachinery include:

metav1: This package provides types and functions for working with metadata in Kubernetes objects, such as labels, annotations, and timestamps.

runtime: The runtime package defines interfaces and functions for working with generic Kubernetes runtime objects. It includes serialization, encoding, and decoding functionality.

util: The util package contains utility functions for working with Kubernetes objects, such as conversion functions and label/selector matching.

schema: The schema package defines types and functions related to the schema of Kubernetes objects. It includes functionalities like OpenAPI validation and schema generation.

Here is an example of how k8s.io/apimachinery might be used in conjunction with k8s.io/client-go:

package main

import (
    "fmt"

    metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    "k8s.io/client-go/kubernetes"
    "k8s.io/client-go/tools/clientcmd"
)

func main() {
    // ... (clientset setup code, similar to the previous example)

    // Example: Working with metav1.ObjectMeta
    labels := map[string]string{"app": "example-app", "env": "production"}
    annotations := map[string]string{"description": "An example pod"}
    objectMeta := metav1.ObjectMeta{
        Name:        "example-pod",
        Namespace:   "default",
        Labels:      labels,
        Annotations: annotations,
    }

    // Example: Using metav1.Time for timestamps
    creationTimestamp := metav1.Time{Time: /* your timestamp here */}
    fmt.Printf("Creation Timestamp: %v\n", creationTimestamp)

    // ... (more examples using k8s.io/apimachinery)
}

Ways of using go client to connect to Kubernetes - Authenticating inside the cluster - Authenticating outside the cluster

In this blog I am using "Authenticating outside the cluster" way.

Demo - Install go client

# wget https://go.dev/dl/go1.21.5.linux-amd64.tar.gz
# tar -C /usr/local -xzf go1.21.5.linux-amd64.tar.gz
# export PATH=$PATH:/usr/local/go/bin
# go version

- Create a client directory and create main.go to connect to k8s cluster

 # mkdir client-go-example
 # cd client-go-example/
 # go mod init client-go-example

**- Lets use go client to connect to k8s cluster and list the nodes **

# vim main.go
package main
import (
        "context"
        "flag"
        "fmt"
        "path/filepath"
        "k8s.io/client-go/kubernetes"
        "k8s.io/client-go/tools/clientcmd"
        "k8s.io/client-go/util/homedir"
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func main() {
        var kubeconfig *string
        if home := homedir.HomeDir(); home != "" {
                kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
        } else {
                kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
        }
        flag.Parse()
        // use the current context in kubeconfig
        config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
        if err != nil {
                panic(err.Error())
        }

        // create the clientset
        clientset, err := kubernetes.NewForConfig(config)
        if err != nil {
                panic(err.Error())
        }
        nodeList, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
        if err != nil {
                panic(err.Error())
        }
        fmt.Println("Nodes in the cluster:")
        for _, node := range nodeList.Items {
                fmt.Printf("  %s\n", node.GetName())
        }
}

# go build
# go mod tidy
# go run main.go

Image description

- Lets Connect to k8s and collect deployed Namespace in Cluster

# vim main.go
package main

import (
        "context"
        "flag"
        "fmt"
        "path/filepath"

        "k8s.io/client-go/kubernetes"
        "k8s.io/client-go/tools/clientcmd"
        "k8s.io/client-go/util/homedir"
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func ListNameSpaces(coreClient kubernetes.Interface) {
        nsList, err := coreClient.CoreV1().
                Namespaces().
                List(context.Background(), metav1.ListOptions{})
        if err != nil {
                panic(err.Error())
        }

        for _, n := range nsList.Items {
                fmt.Printf("  %s\n", n.Name)
        }
}
func main() {
        var kubeconfig *string
        if home := homedir.HomeDir(); home != "" {
                kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
        } else {
                kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
        }
        flag.Parse()

        // use the current context in kubeconfig
        config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
        if err != nil {
                panic(err.Error())
        }

        // create the clientset
        clientset, err := kubernetes.NewForConfig(config)
        if err != nil {
                panic(err.Error())
        }
        ListNameSpaces(clientset)
}

# go run main.go

Image description

- Lets Connect to k8s and collect deployed pods id Namespace in Cluster

package main

import (
        "context"
        "flag"
        "fmt"
        "path/filepath"

        "k8s.io/client-go/kubernetes"
        "k8s.io/client-go/tools/clientcmd"
        "k8s.io/client-go/util/homedir"
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func ListNameSpaces(coreClient kubernetes.Interface) {
        nsList, err := coreClient.CoreV1().
                Namespaces().
                List(context.Background(), metav1.ListOptions{})
        if err != nil {
                panic(err.Error())
        }

        for _, n := range nsList.Items {
               ListPods(coreClient, n.Name)
        }
}

func ListPods(coreClient kubernetes.Interface, namespace string){
        fmt.Printf("Pods in namespace: %s\n", namespace)
        pods, err := coreClient.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
        if err != nil {
           fmt.Printf("Error getting pods in namespace %s: %v\n", namespace, err)
           }
        for _, pod := range pods.Items {
             fmt.Printf("  %s\n", pod.Name)
        }
        fmt.Println()
        }
func main() {
        var kubeconfig *string
        if home := homedir.HomeDir(); home != "" {
                kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
        } else {
                kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
        }
        flag.Parse()

        // use the current context in kubeconfig
        config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
        if err != nil {
                panic(err.Error())
        }

        // create the clientset
        clientset, err := kubernetes.NewForConfig(config)
        if err != nil {
                panic(err.Error())
        }
        ListNameSpaces(clientset)
}

# go run main.go

Image description

These are some example how you can communicate with the Kubernetes cluster using go client. In next blog will see how can we mutate the Kubernets apis.