@@ -6,7 +6,6 @@
|
||||
set -e
|
||||
|
||||
# --- Détermination du rôle du nœud ---
|
||||
# Priorité : option --role > argument positionnel > autodetect via hostname
|
||||
NODE_ROLE=""
|
||||
|
||||
usage() {
|
||||
@@ -17,7 +16,6 @@ usage() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Parsing des arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--role)
|
||||
@@ -37,14 +35,13 @@ while [[ $# -gt 0 ]]; do
|
||||
esac
|
||||
done
|
||||
|
||||
# Autodetect si aucun rôle fourni
|
||||
if [[ -z "$NODE_ROLE" ]]; then
|
||||
if [[ "$(hostname)" == *"master"* ]] || [[ "$(hostname)" == *"control"* ]]; then
|
||||
NODE_ROLE="master"
|
||||
echo "Autodetect: rôle 'master' détecté via hostname ($(hostname))"
|
||||
else
|
||||
NODE_ROLE="worker"
|
||||
echo "Autodetect: rôle 'worker' (hostname: $(hostname) ne contient pas 'master'/'control')"
|
||||
echo "Autodetect: rôle 'worker' (hostname: $(hostname))"
|
||||
fi
|
||||
else
|
||||
echo "Rôle: '$NODE_ROLE' (explicite)"
|
||||
@@ -53,75 +50,113 @@ fi
|
||||
echo "=== Installation des pré-requis Kubernetes sur CentOS 10 [rôle: $NODE_ROLE] ==="
|
||||
|
||||
# Désactiver le swap (requis par Kubernetes)
|
||||
# POURQUOI: Kubernetes doit connaître la RAM réelle disponible pour le scheduling.
|
||||
# Le swap introduit des latences imprévisibles et fausse les limites mémoire des pods.
|
||||
echo "Désactivation du swap..."
|
||||
sudo swapoff -a
|
||||
sudo sed -i '/ swap / s/^/#/' /etc/fstab
|
||||
|
||||
# Désactiver SELinux (mode permissive pour le TD)
|
||||
# POURQUOI: Simplifie le troubleshooting. En production, utiliser SELinux enforcing
|
||||
# avec les policies container-selinux appropriées.
|
||||
echo "Configuration de SELinux en mode permissive..."
|
||||
sudo setenforce 0 || true
|
||||
sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
|
||||
# Installer container-selinux AVANT containerd pour que les labels SELinux soient corrects
|
||||
# POURQUOI: container-selinux fournit la policy qui confine chaque container dans le label
|
||||
# container_t — un container compromis ne peut pas lire /etc/passwd ni écrire
|
||||
# dans les répertoires système de l'hôte, même en root dans le container.
|
||||
echo "Installation de container-selinux..."
|
||||
sudo dnf install -y container-selinux
|
||||
|
||||
# Maintenir SELinux en mode enforcing
|
||||
# POURQUOI: Le mode permissive ne fait que logger les violations sans les bloquer —
|
||||
# c'est une fausse sécurité. En enforcing + container-selinux, les appels
|
||||
# système non autorisés depuis les containers sont bloqués au niveau kernel.
|
||||
echo "Vérification SELinux en mode enforcing..."
|
||||
sudo setenforce 1 || true
|
||||
SELINUX_CURRENT=$(getenforce 2>/dev/null || echo "Unknown")
|
||||
if [[ "$SELINUX_CURRENT" != "Enforcing" ]]; then
|
||||
echo "ATTENTION: SELinux n'est pas en mode Enforcing (actuel: $SELINUX_CURRENT)"
|
||||
echo "Vérifier /etc/selinux/config — SELINUX doit être 'enforcing'"
|
||||
fi
|
||||
|
||||
# Charger les modules kernel nécessaires
|
||||
# POURQUOI:
|
||||
# - overlay: Système de fichiers pour les layers des containers (utilisé par containerd)
|
||||
# - br_netfilter: Permet à iptables/nftables de voir le trafic bridgé (nécessaire pour CNI)
|
||||
# overlay, br_netfilter : requis par containerd et le CNI
|
||||
# ip_vs, ip_vs_rr/rr/wrr/sh : requis par Cilium en mode kube-proxy replacement (IPVS)
|
||||
# nf_conntrack : suivi des connexions réseau (requis par iptables/eBPF)
|
||||
echo "Configuration des modules kernel..."
|
||||
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
|
||||
overlay
|
||||
br_netfilter
|
||||
ip_vs
|
||||
ip_vs_rr
|
||||
ip_vs_wrr
|
||||
ip_vs_sh
|
||||
nf_conntrack
|
||||
EOF
|
||||
|
||||
sudo modprobe overlay
|
||||
sudo modprobe br_netfilter
|
||||
sudo modprobe ip_vs
|
||||
sudo modprobe ip_vs_rr
|
||||
sudo modprobe ip_vs_wrr
|
||||
sudo modprobe ip_vs_sh
|
||||
sudo modprobe nf_conntrack 2>/dev/null || sudo modprobe nf_conntrack_ipv4 2>/dev/null || true
|
||||
|
||||
# Configuration sysctl pour le réseau
|
||||
# POURQUOI:
|
||||
# - bridge-nf-call-iptables: Le trafic bridgé passe par iptables (requis par kube-proxy)
|
||||
# - ip_forward: Active le routage IP entre interfaces (requis par CNI pour pod-to-pod)
|
||||
# Configuration sysctl
|
||||
# bridge-nf-call-iptables/ip6tables : trafic bridgé traité par iptables (requis CNI)
|
||||
# ip_forward : routage IP entre interfaces (requis pod-to-pod)
|
||||
# rp_filter=0 : Cilium eBPF requiert que le reverse path filtering soit désactivé
|
||||
# inotify params : requis par Cilium pour surveiller les changements de configuration
|
||||
echo "Configuration sysctl..."
|
||||
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
|
||||
net.bridge.bridge-nf-call-iptables = 1
|
||||
net.bridge.bridge-nf-call-ip6tables = 1
|
||||
net.ipv4.ip_forward = 1
|
||||
net.ipv4.conf.all.rp_filter = 0
|
||||
fs.inotify.max_user_watches = 524288
|
||||
fs.inotify.max_user_instances = 512
|
||||
EOF
|
||||
|
||||
sudo sysctl --system
|
||||
|
||||
# Désactiver firewalld
|
||||
# POURQUOI: Sur Exoscale (et la plupart des cloud providers), le filtrage réseau est
|
||||
# géré par les security groups au niveau hyperviseur. firewalld actif en doublon
|
||||
# bloque le trafic inter-pods (VXLAN UDP 4789, BGP TCP 179, etc.) localement
|
||||
# sur le nœud, même si le security group l'autorise.
|
||||
echo "Désactivation de firewalld (géré par le security group Exoscale)..."
|
||||
sudo systemctl stop firewalld 2>/dev/null || true
|
||||
sudo systemctl disable firewalld 2>/dev/null || true
|
||||
echo " ✓ firewalld désactivé"
|
||||
# Configurer firewalld avec les règles Kubernetes
|
||||
# POURQUOI: Désactiver firewalld entièrement supprime toute isolation réseau au niveau hôte.
|
||||
# Si un pod bypass le CNI (via un exploit), firewalld local est la dernière barrière.
|
||||
# Defense in depth : security group cloud + firewalld hôte + NetworkPolicy CNI.
|
||||
echo "Configuration de firewalld avec les règles Kubernetes..."
|
||||
sudo systemctl enable firewalld
|
||||
sudo systemctl start firewalld
|
||||
|
||||
if [[ "$NODE_ROLE" == "master" ]]; then
|
||||
echo " Règles control plane..."
|
||||
sudo firewall-cmd --permanent --add-port=6443/tcp # API server
|
||||
sudo firewall-cmd --permanent --add-port=2379-2380/tcp # etcd client + peer
|
||||
sudo firewall-cmd --permanent --add-port=10257/tcp # kube-controller-manager
|
||||
sudo firewall-cmd --permanent --add-port=10259/tcp # kube-scheduler
|
||||
fi
|
||||
|
||||
echo " Règles communes (kubelet + réseau)..."
|
||||
sudo firewall-cmd --permanent --add-port=10250/tcp # kubelet API
|
||||
sudo firewall-cmd --permanent --add-port=30000-32767/tcp # NodePort services
|
||||
sudo firewall-cmd --permanent --add-port=53/tcp # DNS (CoreDNS)
|
||||
sudo firewall-cmd --permanent --add-port=53/udp # DNS (CoreDNS)
|
||||
|
||||
echo " Règles Cilium..."
|
||||
sudo firewall-cmd --permanent --add-port=4240/tcp # Cilium health checks
|
||||
sudo firewall-cmd --permanent --add-port=4244/tcp # Hubble server
|
||||
sudo firewall-cmd --permanent --add-port=4245/tcp # Hubble relay
|
||||
sudo firewall-cmd --permanent --add-port=51871/udp # WireGuard (chiffrement inter-nœuds)
|
||||
sudo firewall-cmd --permanent --add-port=8472/udp # VXLAN (fallback)
|
||||
|
||||
sudo firewall-cmd --permanent --add-masquerade
|
||||
sudo firewall-cmd --reload
|
||||
echo " ✓ firewalld configuré"
|
||||
|
||||
# Installation de containerd depuis le repo Docker
|
||||
# POURQUOI: containerd.io du repo Docker est plus récent et mieux maintenu que celui des repos CentOS
|
||||
echo "Ajout du repo Docker pour containerd..."
|
||||
sudo dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||
|
||||
echo "Installation de containerd..."
|
||||
sudo dnf install -y containerd.io
|
||||
|
||||
# Configuration de containerd
|
||||
sudo mkdir -p /etc/containerd
|
||||
containerd config default | sudo tee /etc/containerd/config.toml > /dev/null
|
||||
|
||||
# Activer systemd cgroup driver (OBLIGATOIRE pour kubeadm)
|
||||
# POURQUOI: CentOS 10 utilise systemd comme init system et gestionnaire de cgroups v2.
|
||||
# Le kubelet utilise aussi systemd cgroup driver par défaut depuis K8s 1.22+.
|
||||
# Si containerd et kubelet utilisent des drivers différents (cgroupfs vs systemd):
|
||||
# - Conflits de gestion mémoire
|
||||
# - Pods qui ne démarrent pas ou crashent
|
||||
# - Métriques incorrectes
|
||||
# - Comportement OOM imprévisible
|
||||
echo "Activation du driver cgroup systemd pour containerd..."
|
||||
sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
|
||||
|
||||
@@ -144,31 +179,48 @@ if [[ "$NODE_ROLE" == "master" ]]; then
|
||||
echo "Installation de kubeadm, kubelet, kubectl (control plane)..."
|
||||
sudo dnf install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
|
||||
else
|
||||
echo "Installation de kubeadm, kubelet (worker — kubectl non requis)..."
|
||||
echo "Installation de kubeadm, kubelet (worker)..."
|
||||
sudo dnf install -y kubelet kubeadm --disableexcludes=kubernetes
|
||||
fi
|
||||
|
||||
# Verrouiller les versions pour éviter les mises à jour accidentelles
|
||||
# POURQUOI: Une mise à jour non planifiée de kubelet peut casser le cluster.
|
||||
# L'upgrade doit être fait de manière contrôlée via kubeadm upgrade.
|
||||
echo "Verrouillage des versions..."
|
||||
sudo dnf install -y 'dnf-command(versionlock)' 2>/dev/null || true
|
||||
if [[ "$NODE_ROLE" == "master" ]]; then
|
||||
sudo dnf versionlock add kubelet kubeadm kubectl 2>/dev/null || echo "Note: versionlock non disponible, pensez à surveiller les mises à jour"
|
||||
sudo dnf versionlock add kubelet kubeadm kubectl 2>/dev/null || echo "Note: versionlock non disponible"
|
||||
else
|
||||
sudo dnf versionlock add kubelet kubeadm 2>/dev/null || echo "Note: versionlock non disponible, pensez à surveiller les mises à jour"
|
||||
sudo dnf versionlock add kubelet kubeadm 2>/dev/null || echo "Note: versionlock non disponible"
|
||||
fi
|
||||
|
||||
# Installer Helm sur le master (requis pour Cilium, KubeArmor, Kyverno)
|
||||
if [[ "$NODE_ROLE" == "master" ]]; then
|
||||
if ! command -v helm &>/dev/null; then
|
||||
echo "Installation de Helm..."
|
||||
HELM_VERSION="v3.17.3"
|
||||
curl -L "https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz" -o /tmp/helm.tar.gz
|
||||
tar -xzf /tmp/helm.tar.gz -C /tmp
|
||||
sudo mv /tmp/linux-amd64/helm /usr/local/bin/helm
|
||||
rm -rf /tmp/linux-amd64 /tmp/helm.tar.gz
|
||||
echo " ✓ Helm ${HELM_VERSION} installé"
|
||||
else
|
||||
echo " ✓ Helm déjà installé: $(helm version --short)"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Activer kubelet
|
||||
sudo systemctl enable kubelet
|
||||
|
||||
echo ""
|
||||
echo "=== Vérifications ==="
|
||||
echo "Swap désactivé: $(free -h | grep Swap | awk '{print $2}') (doit être 0)"
|
||||
echo "SELinux: $(getenforce)"
|
||||
echo "Modules kernel: $(lsmod | grep -E 'overlay|br_netfilter' | wc -l)/2 chargés"
|
||||
SELINUX_STATUS=$(getenforce 2>/dev/null || echo "Unknown")
|
||||
if [[ "$SELINUX_STATUS" == "Enforcing" ]]; then
|
||||
echo "SELinux: $SELINUX_STATUS ✓"
|
||||
else
|
||||
echo "SELinux: $SELINUX_STATUS ⚠ ATTENTION: doit être Enforcing"
|
||||
fi
|
||||
echo "Modules kernel: $(lsmod | grep -E 'overlay|br_netfilter|ip_vs|nf_conntrack' | wc -l)/7 chargés"
|
||||
echo "firewalld: $(systemctl is-active firewalld)"
|
||||
echo "containerd: $(systemctl is-active containerd)"
|
||||
echo "SystemdCgroup: $(grep 'SystemdCgroup = true' /etc/containerd/config.toml > /dev/null && echo 'activé' || echo 'ATTENTION: non activé!')"
|
||||
echo "SystemdCgroup: $(grep 'SystemdCgroup = true' /etc/containerd/config.toml > /dev/null && echo 'activé ✓' || echo 'ATTENTION: non activé!')"
|
||||
echo ""
|
||||
echo "✓ Pré-requis installés avec succès!"
|
||||
echo "Version kubeadm: $(kubeadm version -o short)"
|
||||
@@ -176,3 +228,6 @@ echo "Version kubelet: $(kubelet --version)"
|
||||
if command -v kubectl &>/dev/null; then
|
||||
echo "Version kubectl: $(kubectl version --client -o yaml | grep gitVersion)"
|
||||
fi
|
||||
if command -v helm &>/dev/null; then
|
||||
echo "Version helm: $(helm version --short)"
|
||||
fi
|
||||
|
||||
@@ -4,26 +4,258 @@
|
||||
|
||||
set -e
|
||||
|
||||
echo "=== Initialisation du Control Plane Kubernetes ==="
|
||||
echo "=== Initialisation du Control Plane Kubernetes (hardened) ==="
|
||||
|
||||
# Définir le réseau pod (utilisé par Flannel)
|
||||
POD_NETWORK_CIDR="10.244.0.0/16"
|
||||
APISERVER_IP=$(hostname -I | awk '{print $1}')
|
||||
POD_CIDR="10.244.0.0/16"
|
||||
SERVICE_CIDR="10.96.0.0/12"
|
||||
|
||||
echo "Initialisation de kubeadm avec le réseau pod $POD_NETWORK_CIDR..."
|
||||
sudo kubeadm init --pod-network-cidr=$POD_NETWORK_CIDR --apiserver-advertise-address=$(hostname -I | awk '{print $1}')
|
||||
echo "IP API server : $APISERVER_IP"
|
||||
echo "Pod CIDR : $POD_CIDR"
|
||||
echo "Service CIDR : $SERVICE_CIDR"
|
||||
echo ""
|
||||
|
||||
# --- Audit logging ---
|
||||
echo "Création de la politique d'audit..."
|
||||
sudo mkdir -p /var/log/kubernetes/audit
|
||||
sudo mkdir -p /etc/kubernetes/audit
|
||||
|
||||
sudo tee /etc/kubernetes/audit/audit-policy.yaml > /dev/null <<'EOF'
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
omitStages:
|
||||
- RequestReceived
|
||||
rules:
|
||||
# Tracer tous les accès aux secrets et configmaps (données sensibles)
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: ""
|
||||
resources: ["secrets", "configmaps"]
|
||||
|
||||
# Tracer les modifications RBAC (vecteur d'escalade de privilèges)
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
resources: ["clusterroles", "clusterrolebindings", "roles", "rolebindings"]
|
||||
|
||||
# Tracer exec/portforward/attach (accès interactif aux pods — vecteur d'attaque courant)
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: ""
|
||||
resources: ["pods/exec", "pods/portforward", "pods/attach"]
|
||||
|
||||
# Tracer toutes les créations/suppressions/modifications (niveau Metadata pour réduire le volume)
|
||||
- level: Metadata
|
||||
verbs: ["create", "delete", "patch", "update"]
|
||||
|
||||
# Ignorer le bruit des health checks et composants systèmes
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: ""
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
users: ["system:apiserver"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: ""
|
||||
resources: ["namespaces"]
|
||||
- level: None
|
||||
nonResourceURLs: ["/healthz*", "/readyz*", "/livez*", "/metrics"]
|
||||
|
||||
# Défaut : niveau Metadata pour tout le reste
|
||||
- level: Metadata
|
||||
EOF
|
||||
|
||||
# --- Chiffrement etcd at-rest ---
|
||||
echo "Génération de la clé de chiffrement etcd..."
|
||||
sudo mkdir -p /etc/kubernetes/encryption
|
||||
|
||||
ENCRYPTION_KEY=$(dd if=/dev/urandom bs=32 count=1 2>/dev/null | base64)
|
||||
|
||||
sudo tee /etc/kubernetes/encryption/encryption-config.yaml > /dev/null <<EOF
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
kind: EncryptionConfiguration
|
||||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
providers:
|
||||
- aescbc:
|
||||
keys:
|
||||
- name: key1
|
||||
secret: ${ENCRYPTION_KEY}
|
||||
- identity: {}
|
||||
EOF
|
||||
sudo chmod 600 /etc/kubernetes/encryption/encryption-config.yaml
|
||||
echo " ✓ Clé AES-CBC générée (32 bytes, base64)"
|
||||
|
||||
# --- Config admission controllers ---
|
||||
echo "Création de la configuration des admission controllers..."
|
||||
sudo mkdir -p /etc/kubernetes/admission
|
||||
|
||||
sudo tee /etc/kubernetes/admission/admission-config.yaml > /dev/null <<'EOF'
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
kind: AdmissionConfiguration
|
||||
plugins:
|
||||
- name: EventRateLimit
|
||||
configuration:
|
||||
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
|
||||
kind: Configuration
|
||||
limits:
|
||||
- type: Namespace
|
||||
qps: 50
|
||||
burst: 100
|
||||
cacheSize: 2000
|
||||
- type: User
|
||||
qps: 10
|
||||
burst: 50
|
||||
- name: PodSecurity
|
||||
configuration:
|
||||
apiVersion: pod-security.admission.config.k8s.io/v1
|
||||
kind: PodSecurityConfiguration
|
||||
defaults:
|
||||
enforce: "baseline"
|
||||
enforce-version: "latest"
|
||||
audit: "restricted"
|
||||
audit-version: "latest"
|
||||
warn: "restricted"
|
||||
warn-version: "latest"
|
||||
exemptions:
|
||||
namespaces:
|
||||
- kube-system
|
||||
- kubearmor
|
||||
- kyverno
|
||||
- cilium-system
|
||||
usernames: []
|
||||
runtimeClasses: []
|
||||
EOF
|
||||
|
||||
# --- kubeadm config ---
|
||||
echo "Création de la configuration kubeadm..."
|
||||
|
||||
cat > /tmp/kubeadm-config.yaml <<EOF
|
||||
apiVersion: kubeadm.k8s.io/v1beta4
|
||||
kind: InitConfiguration
|
||||
localAPIEndpoint:
|
||||
advertiseAddress: ${APISERVER_IP}
|
||||
bindPort: 6443
|
||||
nodeRegistration:
|
||||
criSocket: unix:///run/containerd/containerd.sock
|
||||
---
|
||||
apiVersion: kubeadm.k8s.io/v1beta4
|
||||
kind: ClusterConfiguration
|
||||
kubernetesVersion: v1.34.0
|
||||
networking:
|
||||
podSubnet: "${POD_CIDR}"
|
||||
serviceSubnet: "${SERVICE_CIDR}"
|
||||
dnsDomain: "cluster.local"
|
||||
apiServer:
|
||||
extraArgs:
|
||||
- name: audit-log-path
|
||||
value: /var/log/kubernetes/audit/audit.log
|
||||
- name: audit-policy-file
|
||||
value: /etc/kubernetes/audit/audit-policy.yaml
|
||||
- name: audit-log-maxage
|
||||
value: "30"
|
||||
- name: audit-log-maxbackup
|
||||
value: "10"
|
||||
- name: audit-log-maxsize
|
||||
value: "100"
|
||||
- name: encryption-provider-config
|
||||
value: /etc/kubernetes/encryption/encryption-config.yaml
|
||||
- name: enable-admission-plugins
|
||||
value: NodeRestriction,PodSecurity,EventRateLimit
|
||||
- name: admission-control-config-file
|
||||
value: /etc/kubernetes/admission/admission-config.yaml
|
||||
- name: anonymous-auth
|
||||
value: "false"
|
||||
- name: tls-min-version
|
||||
value: VersionTLS12
|
||||
- name: tls-cipher-suites
|
||||
value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||
- name: request-timeout
|
||||
value: "300s"
|
||||
extraVolumes:
|
||||
- name: audit-log
|
||||
hostPath: /var/log/kubernetes/audit
|
||||
mountPath: /var/log/kubernetes/audit
|
||||
pathType: DirectoryOrCreate
|
||||
- name: audit-policy
|
||||
hostPath: /etc/kubernetes/audit/audit-policy.yaml
|
||||
mountPath: /etc/kubernetes/audit/audit-policy.yaml
|
||||
readOnly: true
|
||||
pathType: File
|
||||
- name: encryption-config
|
||||
hostPath: /etc/kubernetes/encryption/encryption-config.yaml
|
||||
mountPath: /etc/kubernetes/encryption/encryption-config.yaml
|
||||
readOnly: true
|
||||
pathType: File
|
||||
- name: admission-config
|
||||
hostPath: /etc/kubernetes/admission/admission-config.yaml
|
||||
mountPath: /etc/kubernetes/admission/admission-config.yaml
|
||||
readOnly: true
|
||||
pathType: File
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
- name: terminated-pod-gc-threshold
|
||||
value: "50"
|
||||
- name: use-service-account-credentials
|
||||
value: "true"
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
cgroupDriver: systemd
|
||||
protectKernelDefaults: true
|
||||
readOnlyPort: 0
|
||||
EOF
|
||||
|
||||
# --- Initialisation du cluster ---
|
||||
echo ""
|
||||
echo "Initialisation de kubeadm..."
|
||||
echo " Paramètres : audit logs + etcd chiffré AES + admission controllers"
|
||||
echo " --skip-phases=addon/kube-proxy : Cilium remplacera kube-proxy (eBPF natif)"
|
||||
echo ""
|
||||
|
||||
sudo kubeadm init \
|
||||
--config=/tmp/kubeadm-config.yaml \
|
||||
--skip-phases=addon/kube-proxy
|
||||
|
||||
# Configuration kubectl
|
||||
echo ""
|
||||
echo "Configuration de kubectl pour l'utilisateur courant..."
|
||||
mkdir -p $HOME/.kube
|
||||
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
|
||||
sudo chown $(id -u):$(id -g) $HOME/.kube/config
|
||||
mkdir -p "$HOME/.kube"
|
||||
sudo cp -i /etc/kubernetes/admin.conf "$HOME/.kube/config"
|
||||
sudo chown "$(id -u):$(id -g)" "$HOME/.kube/config"
|
||||
chmod 600 "$HOME/.kube/config"
|
||||
|
||||
# Nettoyer les fichiers temporaires sensibles
|
||||
rm -f /tmp/kubeadm-config.yaml
|
||||
|
||||
echo ""
|
||||
echo "╔══════════════════════════════════════════════════════════════╗"
|
||||
echo "║ AVERTISSEMENT SÉCURITÉ ║"
|
||||
echo "║ admin.conf = system:masters = bypass RBAC complet ║"
|
||||
echo "║ Usage : bootstrap initial uniquement ║"
|
||||
echo "║ Utiliser 08-generate-restricted-kubeconfig.sh pour ║"
|
||||
echo "║ générer un accès limité pour l'équipe externe ║"
|
||||
echo "╚══════════════════════════════════════════════════════════════╝"
|
||||
echo ""
|
||||
echo "✓ Control plane initialisé avec succès!"
|
||||
echo ""
|
||||
echo "Pour joindre des workers au cluster, récupérez la commande 'kubeadm join' ci-dessus"
|
||||
echo "ou régénérez-la avec: kubeadm token create --print-join-command"
|
||||
echo "Sécurité activée:"
|
||||
echo " ✓ Audit logs → /var/log/kubernetes/audit/audit.log"
|
||||
echo " ✓ etcd chiffré → AES-CBC 256 bits at-rest"
|
||||
echo " ✓ Admission → NodeRestriction + PodSecurity + EventRateLimit"
|
||||
echo " ✓ Auth anonyme → désactivée"
|
||||
echo " ✓ TLS min → 1.2"
|
||||
echo ""
|
||||
echo "Statut des composants du control plane:"
|
||||
echo "Prochaines étapes:"
|
||||
echo " 1. Joindre les workers : 03-join-workers.sh"
|
||||
echo " 2. Installer Cilium : 04-install-cilium.sh"
|
||||
echo " (commande join à récupérer ci-dessus ou via: kubeadm token create --print-join-command)"
|
||||
echo ""
|
||||
echo "Statut du cluster:"
|
||||
kubectl get nodes
|
||||
kubectl get pods -n kube-system
|
||||
|
||||
@@ -6,16 +6,45 @@ set -e
|
||||
|
||||
echo "=== Jonction d'un worker au cluster Kubernetes ==="
|
||||
echo ""
|
||||
echo "ATTENTION: Ce script nécessite la commande 'kubeadm join' générée par le master"
|
||||
echo ""
|
||||
echo "Si vous n'avez pas la commande, exécutez sur le master:"
|
||||
echo "Récupérer les informations sur le master avec:"
|
||||
echo " kubeadm token create --print-join-command"
|
||||
echo ""
|
||||
read -p "Entrez la commande kubeadm join complète: " JOIN_COMMAND
|
||||
|
||||
read -p "IP du master (ex: 192.168.1.10): " MASTER_IP
|
||||
read -p "Token (format: xxxxxx.yyyyyyyyyyyyyyyy): " TOKEN
|
||||
read -s -p "CA cert hash (format: sha256:<64 hex>): " CA_HASH
|
||||
echo ""
|
||||
|
||||
# Validation des formats avant toute exécution
|
||||
# POURQUOI: Passer une commande non validée à sudo permet l'exécution de commandes
|
||||
# arbitraires en root. On reconstruit l'appel kubeadm avec des arguments
|
||||
# explicites pour éviter toute injection.
|
||||
|
||||
if [[ ! "$MASTER_IP" =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$|^[a-zA-Z0-9._-]+$ ]]; then
|
||||
echo "Erreur: format IP/hostname master invalide"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! "$TOKEN" =~ ^[a-z0-9]{6}\.[a-z0-9]{16}$ ]]; then
|
||||
echo "Erreur: format token invalide (attendu: xxxxxx.yyyyyyyyyyyyyyyy)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! "$CA_HASH" =~ ^sha256:[a-f0-9]{64}$ ]]; then
|
||||
echo "Erreur: format CA hash invalide (attendu: sha256:<64 caractères hex>)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Exécution de: $JOIN_COMMAND"
|
||||
sudo $JOIN_COMMAND
|
||||
echo "Composants validés:"
|
||||
echo " Master : ${MASTER_IP}:6443"
|
||||
echo " Token : ${TOKEN:0:6}.****************"
|
||||
echo " CA Hash : sha256:${CA_HASH:7:8}..."
|
||||
echo ""
|
||||
|
||||
sudo kubeadm join "${MASTER_IP}:6443" \
|
||||
--token "$TOKEN" \
|
||||
--discovery-token-ca-cert-hash "$CA_HASH"
|
||||
|
||||
echo ""
|
||||
echo "✓ Worker joint au cluster avec succès!"
|
||||
|
||||
176
partie-01-installation/04-install-cilium.sh
Executable file
176
partie-01-installation/04-install-cilium.sh
Executable file
@@ -0,0 +1,176 @@
|
||||
#!/bin/bash
|
||||
# Partie 1 - Installation du CNI Cilium + Hubble + Tetragon
|
||||
# À exécuter sur le nœud MASTER
|
||||
# Remplace Flannel : Cilium apporte NetworkPolicy L7, chiffrement WireGuard,
|
||||
# observabilité Hubble et runtime security Tetragon.
|
||||
|
||||
set -e
|
||||
|
||||
CILIUM_VERSION="1.17.3"
|
||||
CILIUM_CLI_VERSION="v0.18.4"
|
||||
TETRAGON_VERSION="1.4.0"
|
||||
|
||||
APISERVER_IP=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' | grep -oP '(?<=https://)[^:]+')
|
||||
|
||||
echo "=== Installation Cilium ${CILIUM_VERSION} + Hubble + Tetragon ${TETRAGON_VERSION} ==="
|
||||
echo "API server : ${APISERVER_IP}:6443"
|
||||
echo ""
|
||||
|
||||
# --- Cilium CLI ---
|
||||
echo "Installation du CLI Cilium ${CILIUM_CLI_VERSION}..."
|
||||
CILIUM_CLI_ARCH="amd64"
|
||||
CILIUM_CLI_TAR="cilium-linux-${CILIUM_CLI_ARCH}.tar.gz"
|
||||
CILIUM_CLI_URL="https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/${CILIUM_CLI_TAR}"
|
||||
CILIUM_CLI_SHA_URL="${CILIUM_CLI_URL}.sha256sum"
|
||||
|
||||
curl -L --fail "$CILIUM_CLI_URL" -o "/tmp/${CILIUM_CLI_TAR}"
|
||||
curl -L --fail "$CILIUM_CLI_SHA_URL" -o "/tmp/${CILIUM_CLI_TAR}.sha256sum"
|
||||
|
||||
cd /tmp
|
||||
sha256sum --check "${CILIUM_CLI_TAR}.sha256sum" || {
|
||||
echo "ERREUR: checksum cilium-cli invalide. Abandon."
|
||||
exit 1
|
||||
}
|
||||
cd -
|
||||
|
||||
tar -xzf "/tmp/${CILIUM_CLI_TAR}" -C /tmp cilium
|
||||
sudo mv /tmp/cilium /usr/local/bin/cilium
|
||||
sudo chmod +x /usr/local/bin/cilium
|
||||
rm -f "/tmp/${CILIUM_CLI_TAR}" "/tmp/${CILIUM_CLI_TAR}.sha256sum"
|
||||
echo " ✓ cilium CLI installé"
|
||||
|
||||
# --- Helm repos ---
|
||||
echo "Ajout des repos Helm..."
|
||||
helm repo add cilium https://helm.cilium.io/ 2>/dev/null || helm repo update cilium
|
||||
helm repo update
|
||||
|
||||
# --- Cilium ---
|
||||
echo ""
|
||||
echo "Installation de Cilium ${CILIUM_VERSION}..."
|
||||
echo " Options : kubeProxyReplacement + WireGuard + Hubble + policyEnforcementMode=default"
|
||||
echo ""
|
||||
|
||||
helm upgrade --install cilium cilium/cilium \
|
||||
--version "${CILIUM_VERSION}" \
|
||||
--namespace kube-system \
|
||||
--set kubeProxyReplacement=true \
|
||||
--set k8sServiceHost="${APISERVER_IP}" \
|
||||
--set k8sServicePort=6443 \
|
||||
--set encryption.enabled=true \
|
||||
--set encryption.type=wireguard \
|
||||
--set hubble.enabled=true \
|
||||
--set hubble.relay.enabled=true \
|
||||
--set hubble.ui.enabled=true \
|
||||
--set policyEnforcementMode=default \
|
||||
--set nodeinit.enabled=true \
|
||||
--set ipam.mode=kubernetes \
|
||||
--wait --timeout=10m
|
||||
|
||||
echo ""
|
||||
echo "Attente que Cilium soit opérationnel..."
|
||||
cilium status --wait --wait-duration=5m
|
||||
|
||||
echo ""
|
||||
echo "✓ Cilium opérationnel"
|
||||
|
||||
# --- Tetragon ---
|
||||
echo ""
|
||||
echo "Installation de Tetragon ${TETRAGON_VERSION}..."
|
||||
echo " Tetragon = observabilité runtime eBPF profonde (syscalls, fichiers, réseau)"
|
||||
|
||||
helm upgrade --install tetragon cilium/tetragon \
|
||||
--version "${TETRAGON_VERSION}" \
|
||||
--namespace kube-system \
|
||||
--set tetragon.exportFilename="/var/log/tetragon/tetragon.log" \
|
||||
--wait --timeout=5m
|
||||
|
||||
echo ""
|
||||
echo "Application des TracingPolicies de base..."
|
||||
|
||||
# Surveiller toutes les exécutions de processus (détecte les shells lancés dans des containers,
|
||||
# les outils de reconnaissance, les tentatives d'escalade)
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: cilium.io/v1alpha1
|
||||
kind: TracingPolicy
|
||||
metadata:
|
||||
name: monitor-process-exec
|
||||
spec:
|
||||
kprobes:
|
||||
- call: "sys_execve"
|
||||
syscall: true
|
||||
args:
|
||||
- index: 0
|
||||
type: "string"
|
||||
- index: 1
|
||||
type: "string_array"
|
||||
EOF
|
||||
|
||||
# Surveiller les accès aux fichiers sensibles du cluster et de l'hôte
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: cilium.io/v1alpha1
|
||||
kind: TracingPolicy
|
||||
metadata:
|
||||
name: monitor-sensitive-file-access
|
||||
spec:
|
||||
kprobes:
|
||||
- call: "sys_openat"
|
||||
syscall: true
|
||||
args:
|
||||
- index: 1
|
||||
type: "string"
|
||||
selectors:
|
||||
- matchArgs:
|
||||
- index: 1
|
||||
operator: "Prefix"
|
||||
values:
|
||||
- "/etc/kubernetes"
|
||||
- "/var/lib/etcd"
|
||||
- "/run/secrets/kubernetes.io"
|
||||
- "/proc/1/"
|
||||
EOF
|
||||
|
||||
echo " ✓ TracingPolicies appliquées"
|
||||
|
||||
# --- NetworkPolicy deny-all par défaut ---
|
||||
echo ""
|
||||
echo "Application de la NetworkPolicy deny-all dans le namespace default..."
|
||||
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: default-deny-all
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
EOF
|
||||
|
||||
echo " ✓ NetworkPolicy deny-all appliquée (namespace: default)"
|
||||
|
||||
# --- Vérifications ---
|
||||
echo ""
|
||||
echo "=== Vérifications ==="
|
||||
echo ""
|
||||
echo "1. Statut Cilium:"
|
||||
cilium status
|
||||
echo ""
|
||||
echo "2. Pods Tetragon:"
|
||||
kubectl get pods -n kube-system -l app.kubernetes.io/name=tetragon
|
||||
echo ""
|
||||
echo "3. TracingPolicies:"
|
||||
kubectl get tracingpolicies 2>/dev/null || echo " (CRD TracingPolicy en cours d'initialisation)"
|
||||
echo ""
|
||||
echo "4. Nœuds (doivent être Ready):"
|
||||
kubectl get nodes
|
||||
echo ""
|
||||
echo "✓ Cilium + Hubble + Tetragon installés avec succès!"
|
||||
echo ""
|
||||
echo "Accès à Hubble UI (depuis le master) :"
|
||||
echo " kubectl port-forward -n kube-system svc/hubble-ui 8080:80 &"
|
||||
echo " Puis ouvrir http://localhost:8080"
|
||||
echo ""
|
||||
echo "Logs Tetragon en temps réel :"
|
||||
echo " kubectl logs -n kube-system -l app.kubernetes.io/name=tetragon -f | jq '.'"
|
||||
@@ -1,24 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Partie 1 - Installation du CNI Flannel
|
||||
# À exécuter sur le nœud MASTER
|
||||
|
||||
set -e
|
||||
|
||||
echo "=== Installation du CNI Flannel ==="
|
||||
|
||||
# Télécharger et appliquer le manifest Flannel
|
||||
echo "Application du manifest Flannel..."
|
||||
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
|
||||
|
||||
echo ""
|
||||
echo "Attente du déploiement de Flannel..."
|
||||
kubectl wait --for=condition=ready pod -l app=flannel -n kube-flannel --timeout=180s
|
||||
|
||||
echo ""
|
||||
echo "✓ Flannel installé avec succès!"
|
||||
echo ""
|
||||
echo "Vérification des pods réseau:"
|
||||
kubectl get pods -n kube-flannel
|
||||
echo ""
|
||||
echo "Vérification des nœuds (tous doivent être Ready):"
|
||||
kubectl get nodes
|
||||
174
partie-01-installation/05-install-kubearmor.sh
Executable file
174
partie-01-installation/05-install-kubearmor.sh
Executable file
@@ -0,0 +1,174 @@
|
||||
#!/bin/bash
|
||||
# Partie 1 - Installation de KubeArmor
|
||||
# À exécuter sur le nœud MASTER
|
||||
#
|
||||
# KubeArmor = policies runtime DÉCLARATIVES (allow/block fichiers, processus, réseau)
|
||||
# Complément à Tetragon : KubeArmor BLOQUE, Tetragon TRACE en profondeur.
|
||||
# Les deux sont indépendants et complémentaires :
|
||||
# - Attaque bloquée → KubeArmor (action immédiate)
|
||||
# - Analyse forensique → Tetragon (logs syscall détaillés)
|
||||
|
||||
set -e
|
||||
|
||||
KUBEARMOR_VERSION="1.4.3"
|
||||
|
||||
echo "=== Installation KubeArmor ${KUBEARMOR_VERSION} ==="
|
||||
echo ""
|
||||
|
||||
# --- Installation via Helm ---
|
||||
echo "Ajout du repo Helm KubeArmor..."
|
||||
helm repo add kubearmor https://kubearmor.github.io/charts 2>/dev/null || true
|
||||
helm repo update
|
||||
|
||||
echo ""
|
||||
echo "Installation de KubeArmor ${KUBEARMOR_VERSION}..."
|
||||
helm upgrade --install kubearmor kubearmor/kubearmor \
|
||||
--namespace kubearmor \
|
||||
--create-namespace \
|
||||
--version "${KUBEARMOR_VERSION}" \
|
||||
--set kubearmor.defaultFilePosture=block \
|
||||
--set kubearmor.defaultCapabilitiesPosture=audit \
|
||||
--set kubearmor.defaultNetworkPosture=audit \
|
||||
--wait --timeout=5m
|
||||
|
||||
echo ""
|
||||
echo "Attente que KubeArmor soit opérationnel..."
|
||||
kubectl wait --for=condition=ready pod \
|
||||
-l app.kubernetes.io/name=kubearmor \
|
||||
-n kubearmor \
|
||||
--timeout=120s
|
||||
|
||||
echo " ✓ KubeArmor opérationnel"
|
||||
|
||||
# --- ClusterKubeArmorPolicies de base ---
|
||||
# Ces policies s'appliquent à TOUS les pods du cluster.
|
||||
# Le mode Audit log les violations sans bloquer — basculer en Block après validation.
|
||||
# Le mode Block bloque la violation et génère une alerte.
|
||||
|
||||
echo ""
|
||||
echo "Application des ClusterKubeArmorPolicies de base..."
|
||||
|
||||
# Policy 1 : Bloquer l'exécution de shells dans les containers
|
||||
# POURQUOI: Un attaquant qui obtient l'exécution de code dans un container essaiera
|
||||
# immédiatement d'ouvrir un shell pour explorer l'environnement.
|
||||
# Bloquer /bin/sh, /bin/bash, etc. coupe ce vecteur.
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: security.kubearmor.com/v1
|
||||
kind: ClusterKubeArmorPolicy
|
||||
metadata:
|
||||
name: block-shell-execution
|
||||
annotations:
|
||||
description: "Bloque l'exécution de shells dans tous les containers"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels: {}
|
||||
process:
|
||||
matchPaths:
|
||||
- path: /bin/sh
|
||||
- path: /bin/bash
|
||||
- path: /usr/bin/sh
|
||||
- path: /usr/bin/bash
|
||||
- path: /bin/dash
|
||||
- path: /bin/zsh
|
||||
- path: /usr/bin/zsh
|
||||
action: Block
|
||||
EOF
|
||||
|
||||
# Policy 2 : Bloquer l'accès aux paths sensibles de l'hôte depuis les containers
|
||||
# POURQUOI: /proc/1/ expose les variables d'environnement du processus init (souvent des tokens).
|
||||
# /etc/kubernetes/ contient les certificats et kubeconfigs du cluster.
|
||||
# /var/lib/etcd/ contient les données etcd (même si chiffrées, les lire est suspect).
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: security.kubearmor.com/v1
|
||||
kind: ClusterKubeArmorPolicy
|
||||
metadata:
|
||||
name: block-sensitive-host-paths
|
||||
annotations:
|
||||
description: "Bloque l'accès aux paths sensibles de l'hôte depuis les containers"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels: {}
|
||||
file:
|
||||
matchDirectories:
|
||||
- dir: /proc/1/
|
||||
recursive: false
|
||||
- dir: /etc/kubernetes/
|
||||
recursive: true
|
||||
- dir: /var/lib/etcd/
|
||||
recursive: true
|
||||
- dir: /run/secrets/kubernetes.io/
|
||||
recursive: true
|
||||
action: Block
|
||||
EOF
|
||||
|
||||
# Policy 3 : Audit des outils de reconnaissance réseau
|
||||
# POURQUOI: curl, wget, nc sont légitimes dans certaines apps mais sont aussi les premiers
|
||||
# outils utilisés pour l'exfiltration de données ou la communication C2.
|
||||
# On commence en Audit pour identifier les usages légitimes avant de bloquer.
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: security.kubearmor.com/v1
|
||||
kind: ClusterKubeArmorPolicy
|
||||
metadata:
|
||||
name: audit-network-recon-tools
|
||||
annotations:
|
||||
description: "Audit (puis potentiellement Block) des outils réseau dans les containers"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels: {}
|
||||
process:
|
||||
matchPaths:
|
||||
- path: /usr/bin/curl
|
||||
- path: /usr/bin/wget
|
||||
- path: /bin/nc
|
||||
- path: /usr/bin/nc
|
||||
- path: /usr/bin/ncat
|
||||
- path: /usr/bin/nmap
|
||||
- path: /usr/bin/tcpdump
|
||||
action: Audit
|
||||
EOF
|
||||
|
||||
# Policy 4 : Bloquer les outils de manipulation de capabilities et namespaces
|
||||
# POURQUOI: nsenter, unshare, capsh sont utilisés pour l'évasion de containers.
|
||||
# Un attaquant dans un container peut tenter de rejoindre le namespace hôte.
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: security.kubearmor.com/v1
|
||||
kind: ClusterKubeArmorPolicy
|
||||
metadata:
|
||||
name: block-namespace-escape-tools
|
||||
annotations:
|
||||
description: "Bloque les outils d'évasion de containers (nsenter, unshare, capsh)"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels: {}
|
||||
process:
|
||||
matchPaths:
|
||||
- path: /usr/bin/nsenter
|
||||
- path: /usr/sbin/nsenter
|
||||
- path: /usr/bin/unshare
|
||||
- path: /usr/bin/capsh
|
||||
action: Block
|
||||
EOF
|
||||
|
||||
echo " ✓ 4 ClusterKubeArmorPolicies appliquées"
|
||||
|
||||
# --- Vérifications ---
|
||||
echo ""
|
||||
echo "=== Vérifications ==="
|
||||
echo ""
|
||||
echo "1. Pods KubeArmor:"
|
||||
kubectl get pods -n kubearmor
|
||||
echo ""
|
||||
echo "2. ClusterKubeArmorPolicies actives:"
|
||||
kubectl get clusterubearmorpolicies 2>/dev/null || kubectl get clusterkubearmorpholicies 2>/dev/null || \
|
||||
kubectl get -f - <<< "$(kubectl api-resources --api-group=security.kubearmor.com -o name 2>/dev/null | head -1)" 2>/dev/null || \
|
||||
kubectl get kubearmorpholicies --all-namespaces 2>/dev/null || \
|
||||
echo " (CRDs en cours d'initialisation — vérifier dans 30s)"
|
||||
echo ""
|
||||
echo "✓ KubeArmor installé avec succès!"
|
||||
echo ""
|
||||
echo "Logs KubeArmor en temps réel :"
|
||||
echo " kubectl logs -n kubearmor -l app=kubearmor -f"
|
||||
echo ""
|
||||
echo "NOTE: La policy 'audit-network-recon-tools' est en mode Audit."
|
||||
echo " Après validation des apps légitimes, passer en Block :"
|
||||
echo " kubectl patch clusterkubearmorpholicy audit-network-recon-tools --type=merge -p '{\"spec\":{\"action\":\"Block\"}}'"
|
||||
@@ -1,37 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Partie 1 - Vérification du cluster
|
||||
# À exécuter sur le nœud MASTER
|
||||
|
||||
set -e
|
||||
|
||||
echo "=== Vérification du cluster Kubernetes ==="
|
||||
echo ""
|
||||
|
||||
echo "1. Vérification des nœuds:"
|
||||
kubectl get nodes -o wide
|
||||
echo ""
|
||||
|
||||
echo "2. Vérification des composants du control plane:"
|
||||
kubectl get pods -n kube-system
|
||||
echo ""
|
||||
|
||||
echo "3. Vérification du CNI Flannel:"
|
||||
kubectl get pods -n kube-flannel
|
||||
echo ""
|
||||
|
||||
echo "4. Test de déploiement d'un pod simple:"
|
||||
kubectl run test-pod --image=nginx --restart=Never --rm -i --tty -- echo "✓ Connectivité pod fonctionnelle"
|
||||
echo ""
|
||||
|
||||
echo "5. Vérification de la communication inter-pods:"
|
||||
kubectl create deployment nginx-test --image=nginx --replicas=3
|
||||
kubectl wait --for=condition=available deployment/nginx-test --timeout=60s
|
||||
echo "✓ Déploiement réussi"
|
||||
kubectl get pods -o wide -l app=nginx-test
|
||||
echo ""
|
||||
|
||||
echo "Nettoyage du test..."
|
||||
kubectl delete deployment nginx-test
|
||||
|
||||
echo ""
|
||||
echo "=== Cluster vérifié avec succès! ==="
|
||||
340
partie-01-installation/06-install-kyverno.sh
Executable file
340
partie-01-installation/06-install-kyverno.sh
Executable file
@@ -0,0 +1,340 @@
|
||||
#!/bin/bash
|
||||
# Partie 1 - Installation de Kyverno + policies d'admission
|
||||
# À exécuter sur le nœud MASTER
|
||||
#
|
||||
# Kyverno = admission controller déclaratif (policies YAML, intercepte l'API server)
|
||||
# Agit AVANT le démarrage du pod (vs KubeArmor qui agit pendant l'exécution).
|
||||
# Les deux couches sont complémentaires :
|
||||
# Kyverno → bloque la création de workloads dangereux
|
||||
# KubeArmor → bloque les actions dangereuses dans les workloads qui tournent
|
||||
|
||||
set -e
|
||||
|
||||
KYVERNO_VERSION="3.4.1"
|
||||
|
||||
echo "=== Installation Kyverno ${KYVERNO_VERSION} ==="
|
||||
echo ""
|
||||
|
||||
# --- Installation via Helm ---
|
||||
echo "Ajout du repo Helm Kyverno..."
|
||||
helm repo add kyverno https://kyverno.github.io/kyverno/ 2>/dev/null || true
|
||||
helm repo update
|
||||
|
||||
echo ""
|
||||
echo "Installation de Kyverno ${KYVERNO_VERSION}..."
|
||||
helm upgrade --install kyverno kyverno/kyverno \
|
||||
--namespace kyverno \
|
||||
--create-namespace \
|
||||
--version "${KYVERNO_VERSION}" \
|
||||
--set replicaCount=1 \
|
||||
--set features.policyExceptions.enabled=true \
|
||||
--wait --timeout=5m
|
||||
|
||||
echo ""
|
||||
echo "Attente que Kyverno soit opérationnel..."
|
||||
kubectl wait --for=condition=ready pod \
|
||||
-l app.kubernetes.io/name=kyverno \
|
||||
-n kyverno \
|
||||
--timeout=120s
|
||||
echo " ✓ Kyverno opérationnel"
|
||||
|
||||
echo ""
|
||||
echo "Application des 8 ClusterPolicies de sécurité..."
|
||||
|
||||
# Policy 1 : Interdire les containers privileged
|
||||
# POURQUOI: Un container privileged a accès complet au kernel de l'hôte (comme root sur l'hôte).
|
||||
# C'est le vecteur d'escalade le plus courant — un container privileged peut lire
|
||||
# /dev/kmem, charger des modules kernel, modifier les tables iptables de l'hôte.
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-privileged-containers
|
||||
annotations:
|
||||
policies.kyverno.io/description: "Interdit les containers privileged."
|
||||
spec:
|
||||
validationFailureAction: Enforce
|
||||
background: true
|
||||
rules:
|
||||
- name: deny-privileged
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds: [Pod]
|
||||
validate:
|
||||
message: "Les containers privileged sont interdits."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- =(securityContext):
|
||||
=(privileged): "false"
|
||||
=(initContainers):
|
||||
- =(securityContext):
|
||||
=(privileged): "false"
|
||||
EOF
|
||||
|
||||
# Policy 2 : Interdire hostNetwork, hostPID, hostIPC
|
||||
# POURQUOI:
|
||||
# hostNetwork : accès direct aux interfaces réseau de l'hôte (bypass complet du CNI)
|
||||
# hostPID : accès à tous les processus de l'hôte (lecture mémoire via /proc/<pid>/mem)
|
||||
# hostIPC : accès aux segments de mémoire partagée de l'hôte
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-host-namespaces
|
||||
annotations:
|
||||
policies.kyverno.io/description: "Interdit hostNetwork, hostPID, hostIPC."
|
||||
spec:
|
||||
validationFailureAction: Enforce
|
||||
background: true
|
||||
rules:
|
||||
- name: deny-host-namespaces
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds: [Pod]
|
||||
validate:
|
||||
message: "hostNetwork, hostPID et hostIPC sont interdits."
|
||||
pattern:
|
||||
spec:
|
||||
=(hostNetwork): "false"
|
||||
=(hostPID): "false"
|
||||
=(hostIPC): "false"
|
||||
EOF
|
||||
|
||||
# Policy 3 : Interdire les volumes hostPath
|
||||
# POURQUOI: hostPath monte un répertoire de l'hôte dans le container.
|
||||
# Vecteur classique : monter /etc pour modifier sudoers, /var/lib/kubelet pour
|
||||
# lire les tokens de ServiceAccount, ou / pour accès complet au système de fichiers hôte.
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-hostpath-volumes
|
||||
annotations:
|
||||
policies.kyverno.io/description: "Interdit les volumes hostPath."
|
||||
spec:
|
||||
validationFailureAction: Enforce
|
||||
background: true
|
||||
rules:
|
||||
- name: deny-hostpath
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds: [Pod]
|
||||
validate:
|
||||
message: "Les volumes hostPath sont interdits."
|
||||
deny:
|
||||
conditions:
|
||||
any:
|
||||
- key: "{{ request.object.spec.volumes[].hostPath | length(@) }}"
|
||||
operator: GreaterThan
|
||||
value: 0
|
||||
EOF
|
||||
|
||||
# Policy 4 : Forcer les resource limits
|
||||
# POURQUOI: Sans limits, un pod compromis peut consommer tout le CPU/RAM du nœud
|
||||
# et provoquer un déni de service sur les autres pods (DoS interne).
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: require-resource-limits
|
||||
annotations:
|
||||
policies.kyverno.io/description: "Oblige à définir des limits CPU et mémoire."
|
||||
spec:
|
||||
validationFailureAction: Enforce
|
||||
background: true
|
||||
rules:
|
||||
- name: require-limits
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds: [Pod]
|
||||
namespaces:
|
||||
- "external-app"
|
||||
validate:
|
||||
message: "Les limits CPU et mémoire sont obligatoires."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- resources:
|
||||
limits:
|
||||
cpu: "?*"
|
||||
memory: "?*"
|
||||
EOF
|
||||
|
||||
# Policy 5 : Forcer runAsNonRoot
|
||||
# POURQUOI: Un container tournant en root (UID 0) peut, en cas de breakout, opérer
|
||||
# en root sur l'hôte si les protections kernel (SELinux, seccomp) sont contournées.
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: require-run-as-non-root
|
||||
annotations:
|
||||
policies.kyverno.io/description: "Oblige les containers à tourner en non-root."
|
||||
spec:
|
||||
validationFailureAction: Enforce
|
||||
background: true
|
||||
rules:
|
||||
- name: require-non-root
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds: [Pod]
|
||||
namespaces:
|
||||
- "external-app"
|
||||
validate:
|
||||
message: "Les containers doivent tourner en non-root (runAsNonRoot: true)."
|
||||
pattern:
|
||||
spec:
|
||||
=(securityContext):
|
||||
=(runAsNonRoot): true
|
||||
containers:
|
||||
- =(securityContext):
|
||||
=(runAsNonRoot): true
|
||||
EOF
|
||||
|
||||
# Policy 6 : Forcer readOnlyRootFilesystem
|
||||
# POURQUOI: Un FS racine en lecture seule empêche l'attaquant d'écrire des binaires,
|
||||
# de modifier des scripts de démarrage ou de persister dans le container.
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: require-readonly-rootfs
|
||||
annotations:
|
||||
policies.kyverno.io/description: "Oblige le filesystem racine des containers à être en lecture seule."
|
||||
spec:
|
||||
validationFailureAction: Enforce
|
||||
background: true
|
||||
rules:
|
||||
- name: require-readonly-fs
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds: [Pod]
|
||||
namespaces:
|
||||
- "external-app"
|
||||
validate:
|
||||
message: "readOnlyRootFilesystem doit être true."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
EOF
|
||||
|
||||
# Policy 7 : Interdire le tag :latest sur les images
|
||||
# POURQUOI: :latest est non-reproductible et peut changer silencieusement.
|
||||
# Permet aussi l'injection d'une image malveillante dans le registry.
|
||||
# Forcer les tags immuables (sha256 digest ou version sémantique) garantit
|
||||
# que le code déployé est bien celui qui a été validé.
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-latest-tag
|
||||
annotations:
|
||||
policies.kyverno.io/description: "Interdit l'utilisation du tag :latest."
|
||||
spec:
|
||||
validationFailureAction: Enforce
|
||||
background: true
|
||||
rules:
|
||||
- name: deny-latest
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds: [Pod]
|
||||
validate:
|
||||
message: "Le tag ':latest' est interdit. Utiliser un tag de version ou un digest sha256."
|
||||
foreach:
|
||||
- list: "request.object.spec.containers"
|
||||
deny:
|
||||
conditions:
|
||||
any:
|
||||
- key: "{{ element.image }}"
|
||||
operator: Equals
|
||||
value: "*:latest"
|
||||
- key: "{{ element.image }}"
|
||||
operator: NotIn
|
||||
value: ["*:*"]
|
||||
EOF
|
||||
|
||||
# Policy 8 : Générer automatiquement une NetworkPolicy deny-all dans chaque nouveau namespace
|
||||
# POURQUOI: Tout namespace sans NetworkPolicy est un réseau ouvert — tous les pods
|
||||
# peuvent se parler librement, y compris cross-namespace.
|
||||
# Kyverno génère la policy au moment de la création du namespace,
|
||||
# garantissant qu'aucun namespace n'est créé sans isolation réseau.
|
||||
kubectl apply -f - <<'EOF'
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: generate-default-deny-networkpolicy
|
||||
annotations:
|
||||
policies.kyverno.io/description: "Génère une NetworkPolicy deny-all dans chaque nouveau namespace."
|
||||
spec:
|
||||
rules:
|
||||
- name: generate-deny-all
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds: [Namespace]
|
||||
exclude:
|
||||
any:
|
||||
- resources:
|
||||
names:
|
||||
- kube-system
|
||||
- kube-public
|
||||
- kube-node-lease
|
||||
- kubearmor
|
||||
- kyverno
|
||||
- cilium-system
|
||||
generate:
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
name: default-deny-all
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
synchronize: true
|
||||
data:
|
||||
spec:
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
EOF
|
||||
|
||||
echo " ✓ 8 ClusterPolicies appliquées"
|
||||
|
||||
# --- Vérifications ---
|
||||
echo ""
|
||||
echo "=== Vérifications ==="
|
||||
echo ""
|
||||
echo "1. Pods Kyverno:"
|
||||
kubectl get pods -n kyverno
|
||||
echo ""
|
||||
echo "2. ClusterPolicies actives:"
|
||||
kubectl get clusterpolicies
|
||||
echo ""
|
||||
echo "3. Test de la policy 1 (pod privileged doit être refusé):"
|
||||
if kubectl run kyverno-test-privileged \
|
||||
--image=nginx:1.25 \
|
||||
--restart=Never \
|
||||
--overrides='{"spec":{"containers":[{"name":"kyverno-test-privileged","image":"nginx:1.25","securityContext":{"privileged":true}}]}}' \
|
||||
--dry-run=server 2>&1 | grep -q "disallow-privileged"; then
|
||||
echo " ✓ Pod privileged correctement refusé par Kyverno"
|
||||
else
|
||||
kubectl run kyverno-test-privileged \
|
||||
--image=nginx:1.25 \
|
||||
--restart=Never \
|
||||
--overrides='{"spec":{"containers":[{"name":"kyverno-test-privileged","image":"nginx:1.25","securityContext":{"privileged":true}}]}}' \
|
||||
--dry-run=server 2>&1 || echo " ✓ Pod privileged refusé (vérifier le message ci-dessus)"
|
||||
fi
|
||||
echo ""
|
||||
echo "✓ Kyverno installé avec succès!"
|
||||
echo ""
|
||||
echo "Voir les violations Kyverno :"
|
||||
echo " kubectl get policyreport --all-namespaces"
|
||||
echo " kubectl get clusterpolicyreport"
|
||||
206
partie-01-installation/07-verify-cluster.sh
Executable file
206
partie-01-installation/07-verify-cluster.sh
Executable file
@@ -0,0 +1,206 @@
|
||||
#!/bin/bash
|
||||
# Partie 1 - Vérification complète du cluster sécurisé
|
||||
# À exécuter sur le nœud MASTER
|
||||
|
||||
set -e
|
||||
|
||||
PASS=0
|
||||
FAIL=0
|
||||
|
||||
# Nettoyer les ressources de test même en cas d'échec
|
||||
CLEANUP_RESOURCES=()
|
||||
cleanup() {
|
||||
if [[ ${#CLEANUP_RESOURCES[@]} -gt 0 ]]; then
|
||||
echo ""
|
||||
echo "--- Nettoyage des ressources de test ---"
|
||||
for res in "${CLEANUP_RESOURCES[@]}"; do
|
||||
kubectl delete $res --ignore-not-found=true 2>/dev/null || true
|
||||
done
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
check_ok() { echo " ✓ $1"; ((PASS++)); }
|
||||
check_fail() { echo " ✗ $1"; ((FAIL++)); }
|
||||
|
||||
echo "=== Vérification du cluster Kubernetes sécurisé ==="
|
||||
echo ""
|
||||
|
||||
# --- 1. Nœuds ---
|
||||
echo "1. Nœuds du cluster:"
|
||||
kubectl get nodes -o wide
|
||||
echo ""
|
||||
|
||||
# --- 2. Composants control plane ---
|
||||
echo "2. Composants control plane:"
|
||||
kubectl get pods -n kube-system
|
||||
echo ""
|
||||
|
||||
# --- 3. Cilium ---
|
||||
echo "3. Cilium:"
|
||||
if cilium status 2>/dev/null | grep -q "OK"; then
|
||||
check_ok "Cilium opérationnel"
|
||||
else
|
||||
check_fail "Cilium non opérationnel"
|
||||
fi
|
||||
|
||||
if kubectl get pods -n kube-system -l app.kubernetes.io/name=cilium 2>/dev/null | grep -q "Running"; then
|
||||
check_ok "Pods Cilium en état Running"
|
||||
else
|
||||
check_fail "Pods Cilium non Running"
|
||||
fi
|
||||
|
||||
if kubectl get pods -n kube-system -l app.kubernetes.io/name=hubble-relay 2>/dev/null | grep -q "Running"; then
|
||||
check_ok "Hubble relay opérationnel"
|
||||
else
|
||||
check_fail "Hubble relay non opérationnel"
|
||||
fi
|
||||
|
||||
if kubectl get pods -n kube-system -l app.kubernetes.io/name=tetragon 2>/dev/null | grep -q "Running"; then
|
||||
check_ok "Tetragon opérationnel"
|
||||
else
|
||||
check_fail "Tetragon non opérationnel"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# --- 4. KubeArmor ---
|
||||
echo "4. KubeArmor:"
|
||||
if kubectl get pods -n kubearmor 2>/dev/null | grep -q "Running"; then
|
||||
check_ok "Pods KubeArmor en état Running"
|
||||
else
|
||||
check_fail "Pods KubeArmor non Running"
|
||||
fi
|
||||
|
||||
KUBEARMOR_POLICIES=$(kubectl get clusterkubearmorpholicies 2>/dev/null | grep -c "block\|audit" || echo "0")
|
||||
if [[ "$KUBEARMOR_POLICIES" -ge 4 ]]; then
|
||||
check_ok "ClusterKubeArmorPolicies actives: ${KUBEARMOR_POLICIES}"
|
||||
else
|
||||
check_fail "ClusterKubeArmorPolicies insuffisantes (trouvées: ${KUBEARMOR_POLICIES}, attendues: 4)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# --- 5. Kyverno ---
|
||||
echo "5. Kyverno:"
|
||||
if kubectl get pods -n kyverno 2>/dev/null | grep -q "Running"; then
|
||||
check_ok "Pods Kyverno en état Running"
|
||||
else
|
||||
check_fail "Pods Kyverno non Running"
|
||||
fi
|
||||
|
||||
KYVERNO_POLICIES=$(kubectl get clusterpolicies 2>/dev/null | grep -c "pass\|fail" || \
|
||||
kubectl get clusterpolicies 2>/dev/null | tail -n +2 | wc -l || echo "0")
|
||||
if [[ "$KYVERNO_POLICIES" -ge 8 ]]; then
|
||||
check_ok "ClusterPolicies Kyverno actives: ${KYVERNO_POLICIES}"
|
||||
else
|
||||
check_fail "ClusterPolicies Kyverno insuffisantes (trouvées: ${KYVERNO_POLICIES}, attendues: 8)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# --- 6. Audit logs ---
|
||||
echo "6. Audit logging:"
|
||||
AUDIT_LOG="/var/log/kubernetes/audit/audit.log"
|
||||
if sudo test -f "$AUDIT_LOG" 2>/dev/null; then
|
||||
AUDIT_LINES=$(sudo wc -l < "$AUDIT_LOG" 2>/dev/null || echo "0")
|
||||
check_ok "Audit log actif ($AUDIT_LINES entrées): $AUDIT_LOG"
|
||||
else
|
||||
check_fail "Audit log non trouvé: $AUDIT_LOG"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# --- 7. Chiffrement etcd at-rest ---
|
||||
echo "7. Chiffrement etcd at-rest:"
|
||||
# Créer un secret test et vérifier qu'il est chiffré dans etcd
|
||||
kubectl create secret generic etcd-encryption-check \
|
||||
--from-literal=key=valeur-test-chiffrement \
|
||||
-n default \
|
||||
--dry-run=client -o yaml | kubectl apply -f - > /dev/null 2>&1
|
||||
CLEANUP_RESOURCES+=("secret/etcd-encryption-check -n default")
|
||||
|
||||
ETCD_DATA=$(sudo ETCDCTL_API=3 etcdctl \
|
||||
--endpoints=https://127.0.0.1:2379 \
|
||||
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
|
||||
--cert=/etc/kubernetes/pki/etcd/server.crt \
|
||||
--key=/etc/kubernetes/pki/etcd/server.key \
|
||||
get /registry/secrets/default/etcd-encryption-check 2>/dev/null || echo "")
|
||||
|
||||
if echo "$ETCD_DATA" | grep -q "k8s:enc:aescbc"; then
|
||||
check_ok "Secrets chiffrés AES-CBC dans etcd"
|
||||
elif [[ -z "$ETCD_DATA" ]]; then
|
||||
check_fail "Impossible de lire etcd (vérifier les certificats)"
|
||||
else
|
||||
check_fail "Secrets NON chiffrés dans etcd (données en clair détectées)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# --- 8. Test NetworkPolicy deny-all ---
|
||||
echo "8. NetworkPolicy deny-all:"
|
||||
kubectl run netpol-test-sender \
|
||||
--image=busybox:1.36 \
|
||||
--restart=Never \
|
||||
-- sleep 3600 2>/dev/null || true
|
||||
CLEANUP_RESOURCES+=("pod/netpol-test-sender -n default")
|
||||
|
||||
kubectl run netpol-test-receiver \
|
||||
--image=busybox:1.36 \
|
||||
--restart=Never \
|
||||
-- sleep 3600 2>/dev/null || true
|
||||
CLEANUP_RESOURCES+=("pod/netpol-test-receiver -n default")
|
||||
|
||||
kubectl wait --for=condition=ready pod/netpol-test-sender pod/netpol-test-receiver \
|
||||
--timeout=60s 2>/dev/null || true
|
||||
|
||||
RECEIVER_IP=$(kubectl get pod netpol-test-receiver -o jsonpath='{.status.podIP}' 2>/dev/null || echo "")
|
||||
if [[ -n "$RECEIVER_IP" ]]; then
|
||||
if kubectl exec netpol-test-sender -- \
|
||||
wget -qO- --timeout=3 "http://${RECEIVER_IP}" 2>/dev/null; then
|
||||
check_fail "NetworkPolicy deny-all NON effective (communication inter-pods possible)"
|
||||
else
|
||||
check_ok "NetworkPolicy deny-all effective (communication inter-pods bloquée)"
|
||||
fi
|
||||
else
|
||||
check_fail "Impossible de récupérer l'IP du pod receiver"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# --- 9. Test Kyverno : pod privileged refusé ---
|
||||
echo "9. Kyverno - rejet pod privileged:"
|
||||
KYVERNO_REJECT_OUTPUT=$(kubectl run kyverno-priv-test \
|
||||
--image=nginx:1.25 \
|
||||
--restart=Never \
|
||||
--overrides='{"spec":{"containers":[{"name":"kyverno-priv-test","image":"nginx:1.25","securityContext":{"privileged":true}}]}}' \
|
||||
--dry-run=server 2>&1 || true)
|
||||
|
||||
if echo "$KYVERNO_REJECT_OUTPUT" | grep -qiE "disallow-privileged|admission webhook|denied"; then
|
||||
check_ok "Pod privileged correctement refusé par Kyverno"
|
||||
else
|
||||
check_fail "Pod privileged NON refusé — vérifier la ClusterPolicy disallow-privileged-containers"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# --- 10. Test de déploiement légitime ---
|
||||
echo "10. Test déploiement légitime (sans restrictions):"
|
||||
kubectl create deployment cluster-verify-test \
|
||||
--image=nginx:1.25 \
|
||||
--replicas=2 2>/dev/null || true
|
||||
CLEANUP_RESOURCES+=("deployment/cluster-verify-test -n default")
|
||||
|
||||
kubectl wait --for=condition=available deployment/cluster-verify-test \
|
||||
--timeout=60s 2>/dev/null && \
|
||||
check_ok "Déploiement légitime nginx:1.25 fonctionnel" || \
|
||||
check_fail "Déploiement légitime échoué (vérifier les policies)"
|
||||
echo ""
|
||||
|
||||
# --- Résumé ---
|
||||
echo "============================================="
|
||||
echo " RÉSUMÉ DES VÉRIFICATIONS"
|
||||
echo "============================================="
|
||||
echo " Succès : $PASS"
|
||||
echo " Échecs : $FAIL"
|
||||
echo "============================================="
|
||||
echo ""
|
||||
if [[ $FAIL -eq 0 ]]; then
|
||||
echo "✓ Cluster sécurisé opérationnel — toutes les vérifications passées!"
|
||||
else
|
||||
echo "⚠ $FAIL vérification(s) échouée(s) — voir les détails ci-dessus."
|
||||
exit 1
|
||||
fi
|
||||
225
partie-01-installation/08-generate-restricted-kubeconfig.sh
Executable file
225
partie-01-installation/08-generate-restricted-kubeconfig.sh
Executable file
@@ -0,0 +1,225 @@
|
||||
#!/bin/bash
|
||||
# Partie 1 - Génération d'un kubeconfig restreint pour l'équipe externe
|
||||
# À exécuter sur le nœud MASTER
|
||||
#
|
||||
# Ce kubeconfig permet à l'équipe externe de :
|
||||
# ✓ Déployer une application dans le namespace "external-app"
|
||||
# ✓ Gérer Deployments, Services, Pods, ConfigMaps dans ce namespace
|
||||
# ✗ Accéder aux autres namespaces
|
||||
# ✗ Créer des ClusterRoles ou ClusterRoleBindings
|
||||
# ✗ Exécuter kubectl exec (pods/exec interdit)
|
||||
# ✗ Lire les secrets existants (create/update seulement)
|
||||
|
||||
set -e
|
||||
|
||||
NAMESPACE="external-app"
|
||||
SA_NAME="external-deployer"
|
||||
TOKEN_DURATION="24h"
|
||||
OUTPUT_FILE="./external-team-kubeconfig.yaml"
|
||||
|
||||
echo "=== Génération du kubeconfig restreint pour l'équipe externe ==="
|
||||
echo ""
|
||||
echo "Namespace : $NAMESPACE"
|
||||
echo "Compte : $SA_NAME"
|
||||
echo "Expiration : $TOKEN_DURATION"
|
||||
echo "Fichier : $OUTPUT_FILE"
|
||||
echo ""
|
||||
|
||||
# --- Namespace ---
|
||||
echo "1. Création du namespace ${NAMESPACE}..."
|
||||
kubectl create namespace "$NAMESPACE" --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# Labeliser le namespace pour PodSecurity (enforce: restricted pour l'équipe externe)
|
||||
kubectl label namespace "$NAMESPACE" \
|
||||
pod-security.kubernetes.io/enforce=restricted \
|
||||
pod-security.kubernetes.io/enforce-version=latest \
|
||||
pod-security.kubernetes.io/audit=restricted \
|
||||
pod-security.kubernetes.io/warn=restricted \
|
||||
--overwrite
|
||||
|
||||
echo " ✓ Namespace créé avec PodSecurity=restricted"
|
||||
|
||||
# --- ServiceAccount ---
|
||||
echo ""
|
||||
echo "2. Création du ServiceAccount ${SA_NAME}..."
|
||||
kubectl create serviceaccount "$SA_NAME" \
|
||||
--namespace "$NAMESPACE" \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
echo " ✓ ServiceAccount créé"
|
||||
|
||||
# --- Role namespace-scoped (pas ClusterRole) ---
|
||||
echo ""
|
||||
echo "3. Création du Role (namespace-scoped)..."
|
||||
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: external-deployer-role
|
||||
namespace: ${NAMESPACE}
|
||||
rules:
|
||||
# Déploiement d'application
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["deployments", "replicasets", "statefulsets"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
# Workloads de base
|
||||
- apiGroups: [""]
|
||||
resources: ["pods", "services", "configmaps", "serviceaccounts"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
# Secrets : créer/modifier uniquement — PAS de lecture des secrets existants
|
||||
# POURQUOI: Interdire "get"/"list" sur les secrets empêche la lecture des
|
||||
# ServiceAccount tokens ou credentials déjà présents dans le namespace.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create", "update", "patch"]
|
||||
# Logs des pods (debug légitime)
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/log"]
|
||||
verbs: ["get"]
|
||||
# NetworkPolicy : peut en créer pour exposer son app, mais ne peut pas supprimer la deny-all
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources: ["networkpolicies"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
# Ingress si nécessaire
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
# Lecture des events (debug)
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
# INTERDITS (pas listés = interdits par RBAC) :
|
||||
# - pods/exec (pas de shell interactif dans les pods)
|
||||
# - pods/portforward (pas de tunnel direct)
|
||||
# - ClusterRole, ClusterRoleBinding (pas d'escalade cluster-wide)
|
||||
# - nodes, persistentvolumes, namespaces (ressources cluster-scoped)
|
||||
# - secrets "get"/"list" (pas de lecture de credentials existants)
|
||||
# - verbes "escalate", "bind", "impersonate"
|
||||
EOF
|
||||
|
||||
echo " ✓ Role créé"
|
||||
|
||||
# --- RoleBinding ---
|
||||
echo ""
|
||||
echo "4. Création du RoleBinding..."
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: external-deployer-binding
|
||||
namespace: ${NAMESPACE}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ${SA_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: external-deployer-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
EOF
|
||||
echo " ✓ RoleBinding créé"
|
||||
|
||||
# --- Kyverno policy : interdire l'escalade cluster-wide depuis ce SA ---
|
||||
echo ""
|
||||
echo "5. Kyverno : politique d'escalade pour ${SA_NAME}..."
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: restrict-external-deployer-escalation
|
||||
annotations:
|
||||
policies.kyverno.io/description: "Interdit au SA ${SA_NAME} de créer des ressources cluster-scoped."
|
||||
spec:
|
||||
validationFailureAction: Enforce
|
||||
rules:
|
||||
- name: deny-clusterrole-creation
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds: [ClusterRole, ClusterRoleBinding]
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ${SA_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
validate:
|
||||
message: "Le compte ${SA_NAME} ne peut pas créer de ressources cluster-scoped."
|
||||
deny: {}
|
||||
EOF
|
||||
echo " ✓ Policy anti-escalade Kyverno créée"
|
||||
|
||||
# --- Génération du token ---
|
||||
echo ""
|
||||
echo "6. Génération du token (durée: ${TOKEN_DURATION})..."
|
||||
TOKEN=$(kubectl create token "$SA_NAME" \
|
||||
--namespace "$NAMESPACE" \
|
||||
--duration="$TOKEN_DURATION")
|
||||
echo " ✓ Token généré (expire dans ${TOKEN_DURATION})"
|
||||
|
||||
# --- Construction du kubeconfig ---
|
||||
echo ""
|
||||
echo "7. Construction du kubeconfig..."
|
||||
|
||||
CLUSTER_NAME=$(kubectl config view --minify -o jsonpath='{.clusters[0].name}')
|
||||
APISERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
|
||||
CA_DATA=$(kubectl config view --minify --raw -o jsonpath='{.clusters[0].cluster.certificate-authority-data}')
|
||||
|
||||
cat > "$OUTPUT_FILE" <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: external-context
|
||||
clusters:
|
||||
- name: ${CLUSTER_NAME}
|
||||
cluster:
|
||||
server: ${APISERVER}
|
||||
certificate-authority-data: ${CA_DATA}
|
||||
contexts:
|
||||
- name: external-context
|
||||
context:
|
||||
cluster: ${CLUSTER_NAME}
|
||||
namespace: ${NAMESPACE}
|
||||
user: ${SA_NAME}
|
||||
users:
|
||||
- name: ${SA_NAME}
|
||||
user:
|
||||
token: ${TOKEN}
|
||||
EOF
|
||||
|
||||
chmod 600 "$OUTPUT_FILE"
|
||||
echo " ✓ Kubeconfig écrit: $OUTPUT_FILE (chmod 600)"
|
||||
|
||||
# --- Vérification ---
|
||||
echo ""
|
||||
echo "=== Vérification des permissions ==="
|
||||
echo ""
|
||||
echo "Actions autorisées :"
|
||||
kubectl auth can-i create deployments --as="system:serviceaccount:${NAMESPACE}:${SA_NAME}" -n "$NAMESPACE" && echo " ✓ create deployments" || echo " ✗ create deployments"
|
||||
kubectl auth can-i create pods --as="system:serviceaccount:${NAMESPACE}:${SA_NAME}" -n "$NAMESPACE" && echo " ✓ create pods" || echo " ✗ create pods"
|
||||
kubectl auth can-i create services --as="system:serviceaccount:${NAMESPACE}:${SA_NAME}" -n "$NAMESPACE" && echo " ✓ create services" || echo " ✗ create services"
|
||||
|
||||
echo ""
|
||||
echo "Actions interdites :"
|
||||
kubectl auth can-i get secrets --as="system:serviceaccount:${NAMESPACE}:${SA_NAME}" -n "$NAMESPACE" && echo " ✗ PROBLÈME: get secrets autorisé" || echo " ✓ get secrets refusé"
|
||||
kubectl auth can-i create pods --as="system:serviceaccount:${NAMESPACE}:${SA_NAME}" -n kube-system && echo " ✗ PROBLÈME: create pods dans kube-system autorisé" || echo " ✓ create pods dans kube-system refusé"
|
||||
kubectl auth can-i create clusterroles --as="system:serviceaccount:${NAMESPACE}:${SA_NAME}" && echo " ✗ PROBLÈME: create clusterroles autorisé" || echo " ✓ create clusterroles refusé"
|
||||
kubectl auth can-i create nodes --as="system:serviceaccount:${NAMESPACE}:${SA_NAME}" && echo " ✗ PROBLÈME: create nodes autorisé" || echo " ✓ create nodes refusé"
|
||||
|
||||
echo ""
|
||||
echo "============================================="
|
||||
echo " KUBECONFIG PRÊT À PARTAGER"
|
||||
echo "============================================="
|
||||
echo ""
|
||||
echo " Fichier : $OUTPUT_FILE"
|
||||
echo " Namespace : $NAMESPACE"
|
||||
echo " Expiration : ${TOKEN_DURATION} à partir de maintenant"
|
||||
echo ""
|
||||
echo " IMPORTANT : Ce token expire dans ${TOKEN_DURATION}."
|
||||
echo " Pour régénérer : kubectl create token ${SA_NAME} -n ${NAMESPACE} --duration=${TOKEN_DURATION}"
|
||||
echo ""
|
||||
echo " Vecteurs d'attaque que l'équipe PEUT tenter :"
|
||||
echo " - Escape via les NetworkPolicies (tenter de contacter d'autres namespaces)"
|
||||
echo " - Escalade RBAC (tenter de créer des ClusterRoles/ClusterRoleBindings)"
|
||||
echo " - Container breakout (pod privileged → refusé par Kyverno)"
|
||||
echo " - Shell dans container (bloqué par KubeArmor)"
|
||||
echo " - Lecture secrets (interdit par RBAC)"
|
||||
echo " - Déploiement d'image malveillante avec :latest (refusé par Kyverno)"
|
||||
echo "============================================="
|
||||
Reference in New Issue
Block a user