Обновлены IP-адреса в документации и конфигурационных файлах, изменены параметры хранилища на piraeus-storage в нескольких манифестах, добавлены зависимости для установки мониторинга и других компонентов.

This commit is contained in:
2025-06-24 12:06:24 +07:00
parent ef52bea50f
commit d52d564a7b
14 changed files with 53 additions and 47 deletions

View File

@@ -6,7 +6,6 @@ metadata:
app.kubernetes.io/component: keycloak
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
@@ -86,7 +85,7 @@ spec:
postgresql:
enabled: true
storageClass: "nfs-client"
storageClass: "piraeus-storage"
# externalDatabase:
# host: "keycloak-test-db-rw"

View File

@@ -14,8 +14,9 @@ metadata:
namespace: flux-system
spec:
interval: 1h
install:
createNamespace: true
dependsOn:
- name: monitoring
- name: piraeus
targetNamespace: kyverno
chart:
spec:

View File

@@ -6,7 +6,6 @@ metadata:
app.kubernetes.io/component: loki
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
@@ -16,6 +15,7 @@ spec:
interval: 5m
dependsOn:
- name: monitoring
- name: piraeus
chart:
spec:
version: "6.x"
@@ -84,7 +84,7 @@ spec:
enabled: true
persistence:
enabled: true
storageClass: nfs-client
storageClass: piraeus-storage
size: 20Gi
lokiCanary:
enabled: false
@@ -93,7 +93,7 @@ spec:
replicas: 1
persistence:
enabled: true
storageClass: nfs-client
storageClass: piraeus-storage
size: 10Gi
backend:
replicas: 0

View File

@@ -6,7 +6,6 @@ metadata:
app.kubernetes.io/component: monitoring
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
@@ -14,6 +13,8 @@ metadata:
namespace: flux-system
spec:
interval: 1h
dependsOn:
- name: piraeus
chart:
spec:
chart: kube-prometheus-stack
@@ -32,7 +33,7 @@ spec:
driftDetection:
mode: enabled
ignore:
- paths: [ "/metadata/annotations/prometheus-operator-validated" ]
- paths: ["/metadata/annotations/prometheus-operator-validated"]
target:
kind: PrometheusRule
values:
@@ -48,7 +49,7 @@ spec:
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: nfs-client
storageClassName: piraeus-storage
resources:
requests:
storage: 20Gi

View File

@@ -6,7 +6,6 @@ metadata:
app.kubernetes.io/component: nfs-provisioner
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
@@ -34,5 +33,5 @@ spec:
storageClass:
create: true
defaultClass: true
name: nfs-client
name: piraeus-storage
archiveOnDelete: false

View File

@@ -6,7 +6,6 @@ metadata:
app.kubernetes.io/component: pgadmin
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
@@ -14,6 +13,9 @@ metadata:
namespace: flux-system
spec:
interval: 1h
dependsOn:
- name: monitoring
- name: piraeus
targetNamespace: pgadmin
chart:
spec:
@@ -44,4 +46,4 @@ spec:
accessModes:
- ReadWriteOnce
size: 1Gi
storageClass: "nfs-client"
storageClass: "piraeus-storage"

View File

@@ -6,7 +6,6 @@ metadata:
app.kubernetes.io/component: redis
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
@@ -14,6 +13,9 @@ metadata:
namespace: flux-system
spec:
interval: 1h
dependsOn:
- name: monitoring
- name: piraeus
targetNamespace: redis
chart:
spec:
@@ -34,13 +36,13 @@ spec:
count: 1
persistence:
enabled: true
storageClass: "nfs-client"
storageClass: "piraeus-storage"
size: 4Gi
replica:
replicaCount: 1
persistence:
enabled: true
storageClass: "nfs-client"
storageClass: "piraeus-storage"
size: 4Gi
metrics:
enabled: true

View File

@@ -20,13 +20,13 @@ resources:
# - install/nfs-provisioner.yaml
- install/ingress-nginx.yaml
- install/metrics-server.yaml
# - install/monitoring.yaml
- install/monitoring.yaml
# - install/loki.yaml
# - install/promtail.yaml
# - install/kyverno.yaml
# - install/cpng.yaml
- install/cpng.yaml
# - install/vswh.yaml
# - install/stakater.yaml
- install/stakater.yaml
# - install/keycloak.yaml
# - install/redis.yaml
# - install/pgadmin.yaml

View File

@@ -43,27 +43,27 @@ spec:
type: DirectoryOrCreate
---
# apiVersion: piraeus.io/v1
# kind: LinstorSatelliteConfiguration
# metadata:
# name: storage-pool
# spec:
# storagePools:
# - name: pool1
# lvmThinPool:
# volumeGroup: vg1
# thinPool: thin
# source:
# hostDevices:
# - /dev/sdb
# ---
apiVersion: piraeus.io/v1
kind: LinstorSatelliteConfiguration
metadata:
name: storage-pool
spec:
storagePools:
- name: pool1
lvmThinPool:
volumeGroup: vg1
thinPool: thin
source:
hostDevices:
- /dev/sdb
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: piraeus-storage
# annotations:
# storageclass.kubernetes.io/is-default-class: "true"
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: linstor.csi.linbit.com
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer

View File

@@ -21,7 +21,7 @@ ISO-образы для архитектур amd64 и arm64 доступны н
```bash
talosctl gen secrets -o config/secrets.yaml
talosctl gen config --kubernetes-version 1.33.2 --with-secrets config/secrets.yaml talos-demo https://192.168.23.51:6443 --config-patch @patch.yaml
talosctl gen config --kubernetes-version 1.33.2 --with-secrets config/secrets.yaml talos-demo https://192.168.23.54:6443 --config-patch @patch.yaml
talosctl machineconfig patch config/controlplane.yaml --patch @cp1.patch --output config/cp1.yaml
talosctl machineconfig patch config/controlplane.yaml --patch @cp2.patch --output config/cp2.yaml
@@ -34,20 +34,22 @@ talosctl machineconfig patch config/worker.yaml --patch @worker2.patch --output
эту часть делаем мы
```bash
talosctl apply-config --insecure -n 192.168.23.51 --file config/cp1.yaml
talosctl bootstrap --nodes 192.168.23.51 --endpoints 192.168.23.51 --talosconfig=config/talosconfig
talosctl apply-config --insecure -n 192.168.23.54 --file config/cp1.yaml
talosctl bootstrap --nodes 192.168.23.54 --endpoints 192.168.23.54 --talosconfig=config/talosconfig
talosctl apply-config --insecure -n 192.168.23.52 --file config/cp2.yaml
talosctl apply-config --insecure -n 192.168.23.53 --file config/cp3.yaml
talosctl apply-config --insecure -n 192.168.23.54 --file config/worker1.yaml
talosctl apply-config --insecure -n 192.168.23.55 --file config/worker2.yaml
talosctl apply-config --insecure -n 192.168.23.55 --file config/worker1.yaml
talosctl apply-config --insecure -n 192.168.23.56 --file config/worker2.yaml
talosctl kubeconfig ~/.kube/talos-demo.yaml --nodes 192.168.23.51 --endpoints 192.168.23.51 --talosconfig config/talosconfig
talosctl kubeconfig ~/.kube/talos-demo.yaml --nodes 192.168.23.54 --endpoints 192.168.23.54 --talosconfig config/talosconfig
```
## Cilium
правим api-адрес t8s в cilium/values.yaml
```bash
helm repo add cilium https://helm.cilium.io/
helm repo update

View File

@@ -4,4 +4,4 @@ metadata:
name: pool
spec:
blocks:
- cidr: 192.168.23.21/32
- cidr: 192.168.23.57/32

View File

@@ -11,6 +11,6 @@ spec:
- key: node-role.kubernetes.io/control-plane
operator: DoesNotExist
interfaces:
- ^eth+
- ^ens+
externalIPs: true
loadBalancerIPs: true

View File

@@ -23,7 +23,7 @@ cgroup:
autoMount:
enabled: false
hostRoot: /sys/fs/cgroup
k8sServiceHost: 192.168.23.20
k8sServiceHost: 192.168.23.54
k8sServicePort: 6443
l2announcements:
enabled: true

View File

@@ -9,8 +9,8 @@ vmid_template=777
vmid=80
vm_name=t8s-demo
controllers=3
workers=2
controllers=${2:-3}
workers=${3:-2}
echo "${VERB} stand"