first upload
This commit is contained in:
16
0.deploy-ALL.sh
Executable file
16
0.deploy-ALL.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
./1.kind-cluster-create.sh
|
||||
./2.0.base.sh
|
||||
./2.5.istio-deploy.sh
|
||||
./3.helm-deploy-zabbix-monitoring.sh # <- kommenteld ki a sor elejét, ha nem használsz zabbixot
|
||||
|
||||
echo -ne "\nVárjuk meg, amíg elindul az összes pod... "
|
||||
n=1
|
||||
while [ $(kubectl get po -A | grep -v STATUS | grep -vc Running) -ne 1 ]
|
||||
do
|
||||
sleep 1
|
||||
echo -n "#"
|
||||
n=$((n+1))
|
||||
done
|
||||
echo " ...kész ($n sec)!"
|
6
1.kind-cluster-create.sh
Executable file
6
1.kind-cluster-create.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
kind create cluster --config kind-demo-cluster-config.yaml --name kind-demo --image kindest/node:v1.32.5
|
||||
docker update -m 4g --memory-swap -1 kind-demo-control-plane
|
||||
docker update -m 4g --memory-swap -1 kind-demo-worker
|
||||
docker update -m 4g --memory-swap -1 kind-demo-worker2
|
41
2.0.base.sh
Executable file
41
2.0.base.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
|
||||
# install calico
|
||||
kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.30.2/manifests/calico.yaml
|
||||
|
||||
# install metrics-server
|
||||
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
|
||||
|
||||
# patch metrics-server
|
||||
kubectl patch deployment metrics-server -n kube-system --patch '{
|
||||
"spec": {
|
||||
"template": {
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "metrics-server",
|
||||
"args": [
|
||||
"--cert-dir=/tmp",
|
||||
"--secure-port=10250",
|
||||
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
|
||||
"--kubelet-use-node-status-port",
|
||||
"--metric-resolution=15s",
|
||||
"--kubelet-insecure-tls"
|
||||
]
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}'
|
||||
|
||||
# namespace based policy
|
||||
# Szintek:
|
||||
# - privileged: minden engedélyezett, mint root konténer futtatása, privilege escalation stb.
|
||||
# - baseline: nem engedi a privilege escalation-t, root jogot stb.
|
||||
# - restricted: a legszigorúbb szabály
|
||||
kubectl create namespace secure
|
||||
kubectl label namespace secure \
|
||||
pod-security.kubernetes.io/enforce=restricted \
|
||||
pod-security.kubernetes.io/enforce-version=latest
|
||||
|
||||
# dev-admin és readonly user hozzáadása
|
||||
./setup-kind-users.sh
|
51
2.5.istio-deploy.sh
Executable file
51
2.5.istio-deploy.sh
Executable file
@@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
|
||||
istioctl install --set profile=default -y
|
||||
kubectl get crd gateways.gateway.networking.k8s.io &> /dev/null || kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.2.1/standard-install.yaml
|
||||
|
||||
echo -e "\n########## istio hálózat patchelése: ##########"
|
||||
kubectl patch svc -n istio-system istio-ingressgateway --patch-file istio-ingressgateway-patch-nodeport.yaml
|
||||
|
||||
echo -e "\n########## istio erőforrások patchelése: ##########"
|
||||
kubectl patch deployment istio-ingressgateway -n istio-system --patch '{
|
||||
"spec": {
|
||||
"template": {
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "istio-proxy",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "10m",
|
||||
"memory": "64Mi"
|
||||
},
|
||||
"limits": {
|
||||
"cpu": "500m",
|
||||
"memory": "512Mi"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}'
|
||||
kubectl patch deployment istiod -n istio-system --patch '{
|
||||
"spec": {
|
||||
"template": {
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "discovery",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "10m",
|
||||
"memory": "64Mi"
|
||||
},
|
||||
"limits": {
|
||||
"cpu": "500m",
|
||||
"memory": "512Mi"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}'
|
15
3.helm-deploy-zabbix-monitoring.sh
Executable file
15
3.helm-deploy-zabbix-monitoring.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
HELMPATH="helm-zabbix-monitoring"
|
||||
|
||||
if [[ -d $HELMPATH ]]
|
||||
then
|
||||
cd $HELMPATH
|
||||
git pull
|
||||
else
|
||||
git clone https://git.zabbix.com/scm/zt/kubernetes-helm.git $HELMPATH
|
||||
cd $HELMPATH
|
||||
fi
|
||||
|
||||
helm install zabbix . --dependency-update -f ../zabbix_values.yaml -n monitoring --create-namespace
|
||||
echo -e "\nToken a zabbix makróhoz ({\$KUBE.API.TOKEN}):\n"$(kubectl get secret zabbix-service-account -n monitoring -o jsonpath={.data.token} | base64 -d)"\n"
|
24
istio-ingressgateway-patch-nodeport.yaml
Normal file
24
istio-ingressgateway-patch-nodeport.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: istio-ingressgateway
|
||||
namespace: istio-system
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
nodePort: 30080
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 8080
|
||||
- name: https
|
||||
nodePort: 30443
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: 8443
|
||||
selector:
|
||||
app: istio-ingressgateway
|
||||
istio: ingressgateway
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
status:
|
||||
loadBalancer: {}
|
3
kind-cluster-delete.sh
Executable file
3
kind-cluster-delete.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
kind delete cluster --name kind-demo
|
29
kind-demo-cluster-config.yaml
Normal file
29
kind-demo-cluster-config.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
networking:
|
||||
disableDefaultCNI: true
|
||||
nodes:
|
||||
- role: control-plane
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
node-labels: "ingress-ready=true"
|
||||
- |
|
||||
kind: ClusterConfiguration
|
||||
apiServer:
|
||||
extraArgs:
|
||||
enable-admission-plugins: "PodSecurity"
|
||||
extraPortMappings:
|
||||
- containerPort: 30080
|
||||
hostPort: 80
|
||||
protocol: TCP
|
||||
- containerPort: 30443
|
||||
hostPort: 443
|
||||
protocol: TCP
|
||||
extraMounts:
|
||||
- hostPath: /home/msandor/Kind/volume # <- ezt javítsd a saját mappádra!
|
||||
containerPath: /volume
|
||||
- role: worker
|
||||
- role: worker
|
140
setup-kind-users.sh
Executable file
140
setup-kind-users.sh
Executable file
@@ -0,0 +1,140 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
CLUSTER_NAME="kind-kind-demo"
|
||||
NAMESPACE="dev"
|
||||
OUTPUT_DIR="./rbac-users"
|
||||
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
echo "[+] Kivonjuk a CA tanúsítványt és kulcsot kubeconfigból..."
|
||||
|
||||
CA_CERT_PATH="${OUTPUT_DIR}/ca.crt"
|
||||
CA_KEY_PATH="${OUTPUT_DIR}/ca.key"
|
||||
|
||||
docker cp kind-demo-control-plane:/etc/kubernetes/pki/ca.crt $OUTPUT_DIR/ca.crt
|
||||
docker cp kind-demo-control-plane:/etc/kubernetes/pki/ca.key $OUTPUT_DIR/ca.key
|
||||
|
||||
# Felhasználók listája
|
||||
USERS=("readonly" "dev-admin")
|
||||
|
||||
for USER in "${USERS[@]}"; do
|
||||
echo -e "\n[+] Tanúsítvány generálása: $USER"
|
||||
|
||||
openssl genrsa -out "${OUTPUT_DIR}/${USER}.key" 2048
|
||||
openssl req -new -key "${OUTPUT_DIR}/${USER}.key" \
|
||||
-out "${OUTPUT_DIR}/${USER}.csr" \
|
||||
-subj "/CN=${USER}/O=devs"
|
||||
|
||||
openssl x509 -req \
|
||||
-in "${OUTPUT_DIR}/${USER}.csr" \
|
||||
-CA "$CA_CERT_PATH" \
|
||||
-CAkey "$CA_KEY_PATH" \
|
||||
-CAcreateserial \
|
||||
-out "${OUTPUT_DIR}/${USER}.crt" \
|
||||
-days 365 \
|
||||
-sha256
|
||||
done
|
||||
|
||||
echo -e "\n[+] RBAC létrehozása"
|
||||
|
||||
# Readonly user: cluster-wide view
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: readonly-user
|
||||
subjects:
|
||||
- kind: User
|
||||
name: readonly
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: view
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
EOF
|
||||
|
||||
# Namespace létrehozás, ha még nem létezik
|
||||
kubectl create namespace "$NAMESPACE" --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# dev-admin user: admin jogosultság a dev namespace-ben
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: dev-admin-binding
|
||||
namespace: $NAMESPACE
|
||||
subjects:
|
||||
- kind: User
|
||||
name: dev-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
EOF
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: dev-admin-readonly-global
|
||||
subjects:
|
||||
- kind: User
|
||||
name: dev-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: view
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
EOF
|
||||
|
||||
|
||||
echo -e "\n[+] Kubeconfig fájlok generálása..."
|
||||
|
||||
SERVER=$(kubectl config view --raw -o jsonpath="{.clusters[?(@.name==\"$CLUSTER_NAME\")].cluster.server}")
|
||||
|
||||
for USER in "${USERS[@]}"; do
|
||||
KUBECONFIG_USER="${OUTPUT_DIR}/kubeconfig-${USER}"
|
||||
|
||||
kubectl config --kubeconfig="${KUBECONFIG_USER}" set-cluster "$CLUSTER_NAME" \
|
||||
--server="$SERVER" \
|
||||
--certificate-authority="${CA_CERT_PATH}" \
|
||||
--embed-certs=true
|
||||
|
||||
kubectl config --kubeconfig="${KUBECONFIG_USER}" set-credentials "$USER" \
|
||||
--client-certificate="${OUTPUT_DIR}/${USER}.crt" \
|
||||
--client-key="${OUTPUT_DIR}/${USER}.key" \
|
||||
--embed-certs=true
|
||||
|
||||
kubectl config --kubeconfig="${KUBECONFIG_USER}" set-context "$USER-context" \
|
||||
--cluster="$CLUSTER_NAME" \
|
||||
--user="$USER"
|
||||
|
||||
kubectl config --kubeconfig="${KUBECONFIG_USER}" use-context "$USER-context"
|
||||
|
||||
echo -e "\n [✔] Létrejött: ${KUBECONFIG_USER}"
|
||||
done
|
||||
|
||||
echo -e "\nUserek mergelése:"
|
||||
echo
|
||||
kubectl config set-credentials readonly \
|
||||
--client-certificate=$OUTPUT_DIR/readonly.crt \
|
||||
--client-key=$OUTPUT_DIR/readonly.key \
|
||||
--embed-certs=true
|
||||
|
||||
kubectl config set-context readonly-context \
|
||||
--cluster=kind-kind-demo \
|
||||
--user=readonly
|
||||
|
||||
kubectl config set-credentials dev-admin \
|
||||
--client-certificate=$OUTPUT_DIR/dev-admin.crt \
|
||||
--client-key=$OUTPUT_DIR/dev-admin.key \
|
||||
--embed-certs=true
|
||||
|
||||
kubectl config set-context dev-admin-context \
|
||||
--cluster=kind-kind-demo \
|
||||
--user=dev-admin
|
||||
|
||||
echo "[✅ KÉSZ]"
|
16
zabbix_values.yaml
Normal file
16
zabbix_values.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
zabbixProxy:
|
||||
image:
|
||||
tag: alpine-7.0-latest # <- ha más verziót használsz, módosítsd
|
||||
pullPolicy: Always
|
||||
env:
|
||||
- name: ZBX_HOSTNAME
|
||||
value: "kind-demo"
|
||||
- name: ZBX_SERVER_HOST
|
||||
value: "zabbix.msandor.hu" # <- ide írd a saját zabbixod hoszt nevét, vagy IP címét
|
||||
- name: ZBX_PROXYCONFIGFREQUENCY
|
||||
value: 60
|
||||
|
||||
zabbixAgent:
|
||||
image:
|
||||
tag: alpine-7.0-latest # <- ha más verziót használsz, módosítsd
|
||||
pullPolicy: Always
|
Reference in New Issue
Block a user