This commit is contained in:
2025-06-23 23:05:28 +07:00
commit e8f9d2bbe7
42 changed files with 14772 additions and 0 deletions

5
talos-k8s-flux/README.md Normal file
View File

@@ -0,0 +1,5 @@
# Boot
```bash
flux bootstrap git --url=https://git.realmanual.ru/pub/courses/talos-kurs.git --token-auth --path=talos-k8s-flux/clusters/t8s-demo
```

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,27 @@
# This manifest was generated by flux. DO NOT EDIT.
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: flux-system
namespace: flux-system
spec:
interval: 1m0s
ref:
branch: main
secretRef:
name: flux-system
url: https://git.realmanual.ru/pub/courses/talos-kurs.git
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: flux-system
namespace: flux-system
spec:
interval: 5m0s
path: clusters/t8s-demo
prune: true
sourceRef:
kind: GitRepository
name: flux-system

View File

@@ -0,0 +1,58 @@
apiVersion: v1
kind: Namespace
metadata:
name: coroot-operator
labels:
app.kubernetes.io/component: coroot
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: coroot-operator
namespace: flux-system
spec:
interval: 1h
targetNamespace: coroot-operator
chart:
spec:
chart: coroot-operator
sourceRef:
kind: HelmRepository
name: coroot-repo
namespace: flux-system
interval: 60m
---
apiVersion: v1
kind: Namespace
metadata:
name: coroot
labels:
app.kubernetes.io/component: coroot
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: coroot
namespace: flux-system
spec:
dependsOn:
- name: coroot-operator
interval: 1h
targetNamespace: coroot
chart:
spec:
chart: coroot-ce
sourceRef:
kind: HelmRepository
name: coroot-repo
namespace: flux-system
interval: 60m
values:
clickhouse:
shards: 1
replicas: 1

View File

@@ -0,0 +1,28 @@
apiVersion: v1
kind: Namespace
metadata:
name: cnpg-system
labels:
app.kubernetes.io/component: cnpg
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cnpg
namespace: flux-system
spec:
interval: 1h
install:
createNamespace: true
targetNamespace: cnpg-system
chart:
spec:
chart: cloudnative-pg
version: 0.24.0
sourceRef:
kind: HelmRepository
name: cnpg-repo
namespace: flux-system
interval: 60m

View File

@@ -0,0 +1,41 @@
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/component: ingress-nginx
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: ingress-nginx
namespace: flux-system
spec:
interval: 1h
install:
createNamespace: true
targetNamespace: ingress-nginx
chart:
spec:
chart: ingress-nginx
version: 4.12.3
sourceRef:
kind: HelmRepository
name: ingress-nginx-repo
namespace: flux-system
interval: 60m
values:
controller:
ingressClassResource:
name: nginx
enabled: true
default: true
kind: DaemonSet
service:
type: NodePort
config:
allow-snippet-annotations: true
annotations-risk-level: Critical
enable-global-auth: true

View File

@@ -0,0 +1,105 @@
apiVersion: v1
kind: Namespace
metadata:
name: keycloak
labels:
app.kubernetes.io/component: keycloak
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: keycloak
namespace: flux-system
spec:
interval: 1h
targetNamespace: keycloak
chart:
spec:
chart: keycloak
version: 24.7.4
sourceRef:
kind: HelmRepository
name: bitnami-repo
namespace: flux-system
interval: 60m
values:
ingress:
enabled: false
# hostname: key-dev.bildme.ru
# servicePort: http
# tls: true
# extraTls:
# - hosts:
# - key-dev.bildme.ru
# secretName: tls-ingress
# tls:
# enabled: true
# existingSecret: "tls-ingress"
service:
type: NodePort
http:
enabled: true
ports:
http: 80
https: 443
nodePorts:
# http: "8080"
# https: "8494"
nodePortHttp: "32183"
nodePortHttps: "32184"
# extraVolumes: |
# - name: theme
# emptyDir: {}
# extraVolumeMounts:
# - name: theme
# mountPath: /opt/bitnami/keycloak/themes
# initContainers:
# - name: theme-provider
# image: hub.bildme.ru/img/keycloak-theme:0.0.2
# imagePullPolicy: IfNotPresent
# command:
# - sh
# args:
# - -c
# - |
# echo "Copying theme..."
# cp -R -keycloak-theme/* /theme
# volumeMounts:
# - name: theme
# mountPath: /theme
metrics:
enabled: false
serviceMonitor:
enabled: true
labels:
app: kube-prometheus-stack
release: in-cluster-monitoring
prometheusRule:
enabled: false
postgresql:
enabled: true
storageClass: "nfs-client"
# externalDatabase:
# host: "keycloak-test-db-rw"
# port: 5432
# user: keycloakdbadmin
# database: keycloakinfradbtest
# password: ""
# existingSecret: "keycloak-test-db-app"
# existingSecretHostKey: ""
# existingSecretPortKey: ""
# existingSecretUserKey: ""
# existingSecretDatabaseKey: ""
# existingSecretPasswordKey: ""
# annotations: {}
# httpRelativePath: "/auth/"

View File

@@ -0,0 +1,59 @@
apiVersion: v1
kind: Namespace
metadata:
name: kyverno
labels:
app.kubernetes.io/component: kyverno
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kyverno
namespace: flux-system
spec:
interval: 1h
install:
createNamespace: true
targetNamespace: kyverno
chart:
spec:
chart: kyverno
version: 3.4.2
sourceRef:
kind: HelmRepository
name: kyverno-repo
namespace: flux-system
values:
installCRDs: true
admissionControler:
rbac:
clusterRole:
extraResources:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["*"]
backgroundControler:
rbac:
clusterRole:
extraResources:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["*"]
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: kyverno-policies
namespace: flux-system
spec:
interval: 5m
path: ../../soft/kyverno/
prune: true
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
---

View File

@@ -0,0 +1,121 @@
apiVersion: v1
kind: Namespace
metadata:
name: loki
labels:
app.kubernetes.io/component: loki
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: loki
namespace: flux-system
spec:
interval: 5m
dependsOn:
- name: monitoring
chart:
spec:
version: "6.x"
chart: loki
sourceRef:
kind: HelmRepository
name: grafana-charts
interval: 60m
targetNamespace: loki
values:
chunksCache:
enabled: false
resultsCache:
enabled: false
test:
enabled: false
# following https://github.com/fluxcd/flux2-monitoring-example/pull/23/files#diff-5e041afacf25eb055565b4a1c32d5b81201ddce29c84adf13a6ae88463e0832b
extraObjects:
- apiVersion: v1
kind: ConfigMap
metadata:
name: loki-datasource
labels:
app: loki
chart: loki
release: loki
grafana_datasource: "1"
app.kubernetes.io/part-of: kube-prometheus-stack
data:
loki-datasource.yaml: |-
apiVersion: 1
datasources:
- name: Loki
type: loki
access: proxy
url: http://loki:{{ .Values.loki.server.http_listen_port }}
version: 1
isDefault: true
loki:
auth_enabled: false
# serviceMonitor:
# enabled: true
# labels:
# app.kubernetes.io/part-of: kube-prometheus-stack
limits_config:
allow_structured_metadata: true
retention_period: 24h
volume_enabled: true
# https://grafana.com/docs/loki/latest/setup/install/helm/install-monolithic/
commonConfig:
replication_factor: 1
schemaConfig:
configs:
- from: "2024-04-01"
store: tsdb
object_store: s3
schema: v13
index:
prefix: loki_index_
period: 24h
pattern_ingester:
enabled: true
ruler:
enable_api: true
minio:
enabled: true
persistence:
enabled: true
storageClass: nfs-client
size: 20Gi
lokiCanary:
enabled: false
deploymentMode: SingleBinary
singleBinary:
replicas: 1
persistence:
enabled: true
storageClass: nfs-client
size: 10Gi
backend:
replicas: 0
read:
replicas: 0
write:
replicas: 0
ingester:
replicas: 0
querier:
replicas: 0
queryFrontend:
replicas: 0
queryScheduler:
replicas: 0
distributor:
replicas: 0
compactor:
replicas: 0
indexGateway:
replicas: 0
bloomCompactor:
replicas: 0
bloomGateway:
replicas: 0

View File

@@ -0,0 +1,22 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: metrics-server
namespace: flux-system
spec:
interval: 1h
install:
createNamespace: true
targetNamespace: kube-system
chart:
spec:
chart: metrics-server
version: 3.12.2
sourceRef:
kind: HelmRepository
name: metrics-server-repo
namespace: flux-system
interval: 60m
values:
args:
- --kubelet-insecure-tls

View File

@@ -0,0 +1,66 @@
apiVersion: v1
kind: Namespace
metadata:
name: monitoring
labels:
app.kubernetes.io/component: monitoring
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: monitoring
namespace: flux-system
spec:
interval: 1h
chart:
spec:
chart: kube-prometheus-stack
version: 72.9.1
sourceRef:
kind: HelmRepository
name: monitoring-repo
namespace: flux-system
install:
crds: Create
timeout: 10m0s
upgrade:
crds: Create
timeout: 10m0s
targetNamespace: monitoring
driftDetection:
mode: enabled
ignore:
- paths: [ "/metadata/annotations/prometheus-operator-validated" ]
target:
kind: PrometheusRule
values:
alertmanager:
enabled: false
prometheus:
ingress:
enabled: false
prometheusSpec:
replicas: 1
retention: 24h
retentionSize: "18GB"
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: nfs-client
resources:
requests:
storage: 20Gi
grafana:
enabled: false
kubeControllerManager:
enabled: false
kubeEtcd:
enabled: false
kubeScheduler:
enabled: false
kubeProxy:
enabled: false
kubeApiServer:
enabled: false

View File

@@ -0,0 +1,38 @@
apiVersion: v1
kind: Namespace
metadata:
name: nfs-provisioner
labels:
app.kubernetes.io/component: nfs-provisioner
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: nfs-provisioner
namespace: flux-system
spec:
interval: 1h
targetNamespace: nfs-provisioner
chart:
spec:
chart: nfs-subdir-external-provisioner
version: 4.0.18
sourceRef:
kind: HelmRepository
name: nfs-provisioner-repo
namespace: flux-system
interval: 60m
values:
nfs:
server: 192.168.23.5
path: /mnt/data
mountOptions:
volumeName: nfs-subdir-external-provisioner-root
reclaimPolicy: Retain
storageClass:
create: true
defaultClass: true
name: nfs-client
archiveOnDelete: false

View File

@@ -0,0 +1,47 @@
apiVersion: v1
kind: Namespace
metadata:
name: pgadmin
labels:
app.kubernetes.io/component: pgadmin
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: pgadmin4
namespace: flux-system
spec:
interval: 1h
targetNamespace: pgadmin
chart:
spec:
chart: pgadmin4
version: 1.47.0
sourceRef:
kind: HelmRepository
name: pgadmin-repo
namespace: flux-system
interval: 60m
values:
ingress:
enabled: false
# annotations: {}
# ingressClassName: "nginx"
# hosts:
# - host: pgadmin-oat.bildme.ru
# paths:
# - path: /
# pathType: Prefix
# tls:
# - secretName: tls-self
# hosts:
# - pgadmin-oat.bildme.ru
persistentVolume:
enabled: true
accessModes:
- ReadWriteOnce
size: 1Gi
storageClass: "nfs-client"

View File

@@ -0,0 +1,13 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: piraeus-cluster
namespace: flux-system
spec:
interval: 5m
path: ../../soft/piraeus/
prune: true
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system

View File

@@ -0,0 +1,28 @@
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: promtail
namespace: flux-system
spec:
interval: 5m
timeout: 1m
dependsOn:
- name: monitoring
- name: loki
chart:
spec:
version: "6.x"
chart: promtail
sourceRef:
kind: HelmRepository
name: grafana-charts
interval: 60m
targetNamespace: loki
values:
# https://grafana.com/docs/loki/latest/send-data/promtail/installation/
config:
# publish data to loki
clients:
- url: http://loki-loki-gateway/loki/api/v1/push
tenant_id: 1
---

View File

@@ -0,0 +1,58 @@
apiVersion: v1
kind: Namespace
metadata:
name: redis
labels:
app.kubernetes.io/component: redis
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: redis
namespace: flux-system
spec:
interval: 1h
targetNamespace: redis
chart:
spec:
chart: redis
version: 21.2.3
sourceRef:
kind: HelmRepository
name: bitnami-repo
namespace: flux-system
interval: 60m
values:
global:
redis:
password: ""
auth:
enabled: false
master:
count: 1
persistence:
enabled: true
storageClass: "nfs-client"
size: 4Gi
replica:
replicaCount: 1
persistence:
enabled: true
storageClass: "nfs-client"
size: 4Gi
metrics:
enabled: true
serviceMonitor:
enabled: true
additionalLabels:
prometheus: redis-cluster
app: kube-prometheus-stack
# release: in-cluster-monitoring
prometheusRule:
enabled: true
additionalLabels:
prometheus: redis-cluster
app: kube-prometheus-stack
# release: in-cluster-monitoring

View File

@@ -0,0 +1,25 @@
apiVersion: v1
kind: Namespace
metadata:
name: stakater
labels:
app.kubernetes.io/component: stakater
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: stakater
namespace: flux-system
spec:
interval: 1h
targetNamespace: stakater
chart:
spec:
chart: reloader
sourceRef:
kind: HelmRepository
name: stakater-repo
namespace: flux-system
interval: 60m

View File

@@ -0,0 +1,43 @@
apiVersion: v1
kind: Namespace
metadata:
name: vswh
labels:
app.kubernetes.io/component: vswh
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: vault-secrets-webhook
namespace: flux-system
spec:
interval: 10m
releaseName: vswh
chartRef:
kind: OCIRepository
name: vault-secrets-webhook
namespace: flux-system
targetNamespace: vswh
values:
# vaultEnv:
# repository: hub.ntk.novotelecom.ru/img/vault-env
certificate:
certLifespan: 3650
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: vault-operator-rbac
namespace: flux-system
spec:
interval: 5m
prune: true
sourceRef:
kind: GitRepository
name: vault-operator
namespace: flux-system
targetNamespace: vswh
path: ./deploy/rbac

View File

@@ -0,0 +1,33 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- gotk-components.yaml
- gotk-sync.yaml
- ../../soft/piraeus.yaml
- ../../soft/cpng.yaml
- ../../soft/ingress-nginx.yaml
- ../../soft/metrics-server.yaml
- ../../soft/nfs-provisioner.yaml
- ../../soft/monitoring.yaml
- ../../soft/kyverno.yaml
- ../../soft/loki.yaml
- ../../soft/vault-secrets-webhook.yaml
- ../../soft/stakater.yaml
- ../../soft/bitnami.yaml
- ../../soft/pgadmin.yaml
- ../../soft/coroot.yaml
- install/piraeus.yaml
# - install/nfs-provisioner.yaml
- install/ingress-nginx.yaml
- install/metrics-server.yaml
# - install/monitoring.yaml
# - install/loki.yaml
# - install/promtail.yaml
# - install/kyverno.yaml
# - install/cpng.yaml
# - install/vswh.yaml
# - install/stakater.yaml
# - install/keycloak.yaml
# - install/redis.yaml
# - install/pgadmin.yaml
# - install/coroot.yaml

View File

@@ -0,0 +1,8 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: bitnami-repo
namespace: flux-system
spec:
interval: 1h
url: https://mirror.yandex.ru/helm/charts.bitnami.com/

View File

@@ -0,0 +1,8 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: coroot-repo
namespace: flux-system
spec:
interval: 1h
url: https://coroot.github.io/helm-charts

View File

@@ -0,0 +1,9 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: cnpg-repo
namespace: flux-system
spec:
interval: 1h
url: https://cloudnative-pg.github.io/charts
---

View File

@@ -0,0 +1,9 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: ingress-nginx-repo
namespace: flux-system
spec:
interval: 1h
url: https://kubernetes.github.io/ingress-nginx
---

View File

@@ -0,0 +1,9 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: kyverno-repo
namespace: flux-system
spec:
interval: 1h
url: https://kyverno.github.io/kyverno/
---

View File

@@ -0,0 +1,26 @@
apiVersion: kyverno.io/v1
kind: Policy
metadata:
name: require-labels
namespace: kyverno
annotations:
meta.helm.sh/release-name: kyverno
meta.helm.sh/release-namespace: flux-system
labels:
app.kubernetes.io/managed-by: Helm
spec:
validationFailureAction: audit
background: true
rules:
- name: require-labels
match:
any:
- resources:
kinds:
- Pod
validate:
message: "Label 'app.kubernetes.io/name' is required"
pattern:
metadata:
labels:
app.kubernetes.io/name: "?*"

View File

@@ -0,0 +1,9 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: grafana-charts
namespace: flux-system
spec:
interval: 1h
url: https://grafana.github.io/helm-charts
---

View File

@@ -0,0 +1,9 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: metrics-server-repo
namespace: flux-system
spec:
interval: 1h
url: https://kubernetes-sigs.github.io/metrics-server/
---

View File

@@ -0,0 +1,9 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: monitoring-repo
namespace: flux-system
spec:
interval: 1h
url: https://prometheus-community.github.io/helm-charts
---

View File

@@ -0,0 +1,9 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: nfs-provisioner-repo
namespace: flux-system
spec:
interval: 1h
url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner
---

View File

@@ -0,0 +1,8 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: pgadmin-repo
namespace: flux-system
spec:
interval: 1h
url: https://helm.runix.net

View File

@@ -0,0 +1,12 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: piraeus-repo
namespace: flux-system
spec:
interval: 1h
ref:
branch: v2
timeout: 60s
url: https://github.com/piraeusdatastore/piraeus-operator.git
---

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- piraeus-operator.yaml
- piraeus-cluster.yaml

View File

@@ -0,0 +1,83 @@
apiVersion: piraeus.io/v1
kind: LinstorCluster
metadata:
name: linstorcluster
spec: {}
# nodeAffinity:
# nodeSelectorTerms:
# - matchExpressions:
# - key: node-role.kubernetes.io/control-plane
# operator: DoesNotExist
---
apiVersion: piraeus.io/v1
kind: LinstorSatelliteConfiguration
metadata:
name: talos-loader-override
spec:
podTemplate:
spec:
initContainers:
- name: drbd-shutdown-guard
$patch: delete
- name: drbd-module-loader
$patch: delete
volumes:
- name: run-systemd-system
$patch: delete
- name: run-drbd-shutdown-guard
$patch: delete
- name: systemd-bus-socket
$patch: delete
- name: lib-modules
$patch: delete
- name: usr-src
$patch: delete
- name: etc-lvm-backup
hostPath:
path: /var/etc/lvm/backup
type: DirectoryOrCreate
- name: etc-lvm-archive
hostPath:
path: /var/etc/lvm/archive
type: DirectoryOrCreate
---
# apiVersion: piraeus.io/v1
# kind: LinstorSatelliteConfiguration
# metadata:
# name: storage-pool
# spec:
# storagePools:
# - name: pool1
# lvmThinPool:
# volumeGroup: vg1
# thinPool: thin
# source:
# hostDevices:
# - /dev/sdb
# ---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: piraeus-storage
# annotations:
# storageclass.kubernetes.io/is-default-class: "true"
provisioner: linstor.csi.linbit.com
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
parameters:
linstor.csi.linbit.com/storagePool: pool1
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: piraeus-storage-replicated
provisioner: linstor.csi.linbit.com
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
parameters:
linstor.csi.linbit.com/storagePool: pool1
linstor.csi.linbit.com/placementCount: "2"

View File

@@ -0,0 +1,23 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: piraeus
namespace: flux-system
spec:
interval: 1h
install:
createNamespace: true
targetNamespace: piraeus-datastore
chart:
spec:
chart: charts/piraeus
version: 2.8.1
sourceRef:
kind: GitRepository
name: piraeus-repo
namespace: flux-system
interval: 60m
values:
fullnameOverride: piraeus
installCRDs: true
---

View File

@@ -0,0 +1,9 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: stakater-repo
namespace: flux-system
spec:
interval: 1h
url: https://stakater.github.io/stakater-charts
---

View File

@@ -0,0 +1,22 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: OCIRepository
metadata:
name: vault-secrets-webhook
namespace: flux-system
spec:
interval: 1h
url: oci://ghcr.io/bank-vaults/helm-charts/vault-secrets-webhook
ref:
tag: 1.21.4
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: vault-operator
namespace: flux-system
spec:
interval: 1h
url: https://github.com/bank-vaults/vault-operator.git
ref:
tag: v1.22.6

108
talos-k8s/README.md Normal file
View File

@@ -0,0 +1,108 @@
# Talos
## Что такое Talos?
[Talos Linux](https://www.talos.dev/) — это **Linux**, разработанный для [Kubernetes](https://kubernetes.io/): безопасный, неизменяемый и минималистичный.
- Поддерживает **облачные платформы, «голое железо» и платформы виртуализации**
- Все **управление системой осуществляется через API**. Нет SSH, оболочки или консоли
- Готовность к работе: **поддерживает одни из крупнейших Kubernetes-кластеров в мире**
- Проект с открытым исходным кодом от команды [Sidero Labs](https://www.siderolabs.com/)
ISO-образы для архитектур amd64 и arm64 доступны на [странице релизов Talos](https://github.com/siderolabs/talos/releases/).
[руководству по началу работы](https://www.talos.dev/v1.9/introduction/getting-started/)
[фабрика по сборке iso](https://factory.talos.dev/)
## Генерация и установка
эту часть делает ренератор generator.sh
```bash
talosctl gen secrets -o config/secrets.yaml
talosctl gen config --kubernetes-version 1.33.2 --with-secrets config/secrets.yaml talos-demo https://192.168.23.51:6443 --config-patch @patch.yaml
talosctl machineconfig patch config/controlplane.yaml --patch @cp1.patch --output config/cp1.yaml
talosctl machineconfig patch config/controlplane.yaml --patch @cp2.patch --output config/cp2.yaml
talosctl machineconfig patch config/controlplane.yaml --patch @cp3.patch --output config/cp3.yaml
talosctl machineconfig patch config/worker.yaml --patch @worker1.patch --output config/worker1.yaml
talosctl machineconfig patch config/worker.yaml --patch @worker2.patch --output config/worker2.yaml
```
эту часть делаем мы
```bash
talosctl apply-config --insecure -n 192.168.23.51 --file config/cp1.yaml
talosctl bootstrap --nodes 192.168.23.51 --endpoints 192.168.23.51 --talosconfig=config/talosconfig
talosctl apply-config --insecure -n 192.168.23.52 --file config/cp2.yaml
talosctl apply-config --insecure -n 192.168.23.53 --file config/cp3.yaml
talosctl apply-config --insecure -n 192.168.23.54 --file config/worker1.yaml
talosctl apply-config --insecure -n 192.168.23.55 --file config/worker2.yaml
talosctl kubeconfig ~/.kube/talos-demo.yaml --nodes 192.168.23.51 --endpoints 192.168.23.51 --talosconfig config/talosconfig
```
## Cilium
```bash
helm repo add cilium https://helm.cilium.io/
helm repo update
helm upgrade \
--install \
cilium \
cilium/cilium \
--version 1.17.5 \
--namespace kube-system \
--values cilium/values.yaml \
--set cluster.name=talos-demo \
--set cluster.id=1
kubectl apply -f cilium/ippool.yaml
kubectl apply -f cilium/l2-announcement-policy.yaml
```
## Re-Apply configs
```bash
talosctl --talosconfig config/talosconfig apply-config -n 192.168.23.51 --file config/cp1.yaml
talosctl --talosconfig config/talosconfig apply-config -n 192.168.23.52 --file config/cp2.yaml
talosctl --talosconfig config/talosconfig apply-config -n 192.168.23.53 --file config/cp3.yaml
talosctl --talosconfig config/talosconfig apply-config -n 192.168.23.54 --file config/worker1.yaml
talosctl --talosconfig config/talosconfig apply-config -n 192.168.23.55 --file config/worker2.yaml
```
## Re-Apply configs and reboot
```bash
talosctl --talosconfig config/talosconfig apply-config -n 192.168.23.51 --file config/cp1.yaml --mode=reboot
talosctl --talosconfig config/talosconfig apply-config -n 192.168.23.52 --file config/cp2.yaml --mode=reboot
talosctl --talosconfig config/talosconfig apply-config -n 192.168.23.53 --file config/cp3.yaml --mode=reboot
talosctl --talosconfig config/talosconfig apply-config -n 192.168.23.54 --file config/worker1.yaml --mode=reboot
talosctl --talosconfig config/talosconfig apply-config -n 192.168.23.55 --file config/worker2.yaml --mode=reboot
```
## Upgrade
```bash
IMAGE=factory.talos.dev/metal-installer/956b9107edd250304169d2e7a765cdd4e0c31f9097036e2e113b042e6c01bb98:v1.10.4
talosctl --talosconfig config/talosconfig upgrade --nodes 192.168.23.51 --image ${IMAGE}
talosctl --talosconfig config/talosconfig upgrade --nodes 192.168.23.52 --image ${IMAGE}
talosctl --talosconfig config/talosconfig upgrade --nodes 192.168.23.53 --image ${IMAGE}
talosctl --talosconfig config/talosconfig upgrade --nodes 192.168.23.53 --image ${IMAGE}
talosctl --talosconfig config/talosconfig upgrade --nodes 192.168.23.55 --image ${IMAGE}
```
## Upgrade k8s
```bash
talosctl --nodes 192.168.23.100 --talosconfig config/talosconfig upgrade-k8s --to 1.33.2
```

View File

@@ -0,0 +1,7 @@
apiVersion: cilium.io/v2alpha1
kind: CiliumLoadBalancerIPPool
metadata:
name: pool
spec:
blocks:
- cidr: 192.168.23.21/32

View File

@@ -0,0 +1,16 @@
apiVersion: cilium.io/v2alpha1
kind: CiliumL2AnnouncementPolicy
metadata:
name: policy1
spec:
serviceSelector:
matchLabels:
color: blue
nodeSelector:
matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: DoesNotExist
interfaces:
- ^eth+
externalIPs: true
loadBalancerIPs: true

View File

@@ -0,0 +1,35 @@
ipam:
mode: kubernetes
kubeProxyReplacement: true
securityContext:
capabilities:
ciliumAgent:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
cleanCiliumState:
- NET_ADMIN
- SYS_ADMIN
- SYS_RESOURCE
cgroup:
autoMount:
enabled: false
hostRoot: /sys/fs/cgroup
k8sServiceHost: 192.168.23.20
k8sServicePort: 6443
l2announcements:
enabled: true
devices: ens+
hubble:
relay:
enabled: true
ui:
enabled: true

438
talos-k8s/generate.sh Executable file
View File

@@ -0,0 +1,438 @@
#!/bin/bash
### Дефолты ###
IMAGE=factory.talos.dev/metal-installer/956b9107edd250304169d2e7a765cdd4e0c31f9097036e2e113b042e6c01bb98:v1.10.4
DEFAULT_K8S_VERSION=1.33.2
CONFIG_DIR="config"
# Цвета для вывода
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Helper function for checking uniqueness in an array
contains_element () {
local e match="$1"
shift
for e; do [[ "$e" == "$match" ]] && return 0; done
return 1
}
# Function for asking yes/no questions
ask_yes_no() {
local prompt="$1"
local default="$2"
local answer
while true; do
read -p "$prompt" answer
answer=${answer:-$default}
answer=$(echo "$answer" | tr '[:upper:]' '[:lower:]') # convert to lowercase
case "$answer" in
y|yes)
echo "y"
return 0
;;
n|no)
echo "n"
return 0
;;
*)
echo -e "${YELLOW}Некорректный ввод. Введите 'y'/'yes' или 'n'/'no'.${NC}"
;;
esac
done
}
echo -e "${GREEN}--- Интерактивный конфигуратор Talos K8s ---${NC}"
# --- Вопросы пользователю ---
# Имя кластера
read -p "Введите имя кластера [talos-demo]: " CLUSTER_NAME
CLUSTER_NAME=${CLUSTER_NAME:-talos-demo}
# Версия Kubernetes
read -p "Введите версию Kubernetes [${DEFAULT_K8S_VERSION}]: " K8S_VERSION
K8S_VERSION=${K8S_VERSION:-${DEFAULT_K8S_VERSION}}
echo "K8S_VERSION: ${K8S_VERSION}"
# Имя сетевого адаптера
read -p "Введите имя сетевого адаптера (например, ens18 или eth0) [ens18]: " INTERFACE_NAME
INTERFACE_NAME=${INTERFACE_NAME:-ens18}
# Количество control plane
while true; do
read -p "Введите количество control plane (нечетное, макс 7) [1]: " CP_COUNT
CP_COUNT=${CP_COUNT:-1}
if (( CP_COUNT % 2 != 0 && CP_COUNT > 0 && CP_COUNT <= 7 )); then
break
else
echo -e "${YELLOW}Некорректное значение. Введите нечетное число от 1 до 7.${NC}"
fi
done
# Количество worker-нод
while true; do
read -p "Введите количество worker-нод (макс 15, мин 0) [2]: " WORKER_COUNT
WORKER_COUNT=${WORKER_COUNT:-2}
if (( WORKER_COUNT >= 0 && WORKER_COUNT <= 15 )); then
break
else
echo -e "${YELLOW}Некорректное значение. Введите число от 0 до 15.${NC}"
fi
done
# Общий шлюз
while true; do
read -p "Введите общий gateway (например, 192.168.23.1): " GATEWAY
if [[ -n "$GATEWAY" ]]; then
USED_IPS+=("$GATEWAY")
break
else
echo -e "${YELLOW}Gateway не может быть пустым.${NC}"
fi
done
# Маска сети
while true; do
read -p "Введите маску сети (например, 24) [24]: " NETMASK
NETMASK=${NETMASK:-24}
if [[ -n "$NETMASK" ]]; then
break
else
echo -e "${YELLOW}Маска сети не может быть пустой.${NC}"
fi
done
# DNS-серверы
while true; do
read -p "Введите первый DNS сервер: (например, 8.8.8.8) [8.8.8.8]: " DNS1
DNS1=${DNS1:-8.8.8.8}
if [[ -n "$DNS1" ]]; then
break
else
echo -e "${YELLOW}DNS сервер не может быть пустым.${NC}"
fi
done
while true; do
read -p "Введите второй DNS сервер: (например, 8.8.4.4) [8.8.4.4]: " DNS2
DNS2=${DNS2:-8.8.4.4}
if [[ -n "$DNS2" ]]; then
break
else
echo -e "${YELLOW}DNS сервер не может быть пустым.${NC}"
fi
done
# NTP-серверы
read -p "Введите первый NTP сервер [1.ru.pool.ntp.org]: " NTP1
NTP1=${NTP1:-1.ru.pool.ntp.org}
read -p "Введите второй NTP сервер [2.ru.pool.ntp.org]: " NTP2
NTP2=${NTP2:-2.ru.pool.ntp.org}
read -p "Введите третий NTP сервер [3.ru.pool.ntp.org]: " NTP3
NTP3=${NTP3:-3.ru.pool.ntp.org}
# VIP адрес
VIP_IP=""
USE_VIP="n"
if (( CP_COUNT > 1 )); then
USE_VIP=$(ask_yes_no "Нужен ли VIP адрес? (y/n) [y]: " "y")
if [[ "$USE_VIP" == "y" ]]; then
while true; do
read -p "Введите VIP адрес (например, 192.168.23.20): " VIP_IP
if [[ -z "$VIP_IP" ]]; then
echo -e "${YELLOW}VIP адрес не может быть пустым.${NC}"
continue
fi
if contains_element "$VIP_IP" "${USED_IPS[@]}"; then
echo -e "${YELLOW}Этот IP адрес уже используется. Введите уникальный адрес.${NC}"
else
USED_IPS+=("$VIP_IP")
break
fi
done
fi
fi
# Внешний балансировщик
EXT_BALANCER_IP=""
USE_EXT_BALANCER=$(ask_yes_no "Нужен ли внешний балансировщик? (y/n) [n]: " "n")
if [[ "$USE_EXT_BALANCER" == "y" ]]; then
while true; do
read -p "Введите IP адрес внешнего балансировщика: " EXT_BALANCER_IP_INPUT
if [[ -z "$EXT_BALANCER_IP_INPUT" ]]; then
echo -e "${YELLOW}IP адрес не может быть пустым.${NC}"
continue
fi
if contains_element "$EXT_BALANCER_IP_INPUT" "${USED_IPS[@]}"; then
echo -e "${YELLOW}Этот IP адрес уже используется. Введите уникальный адрес.${NC}"
else
EXT_BALANCER_IP=$EXT_BALANCER_IP_INPUT
USED_IPS+=("$EXT_BALANCER_IP")
break
fi
done
fi
# Диск
read -p "Введите диск для worker-нод (например, /dev/sda) [/dev/sda]: " DISK
DISK=${DISK:-/dev/sda}
# Поддержка drbd
USE_DRBD=$(ask_yes_no "Нужна ли поддержка drbd? (y/n) [y]: " "y")
# --- Генерация конфигурационных файлов ---
mkdir -p "$CONFIG_DIR"
PATCH_FILE="$CONFIG_DIR/patch.yaml"
# --- Создание patch.yaml ---
# Этот подход не использует sed и работает одинаково на macOS и Linux.
# Записываем первую часть файла
cat > "$PATCH_FILE" << EOF
machine:
EOF
# Добавляем certSANs если есть внешний балансировщик
if [[ -n "$EXT_BALANCER_IP" ]]; then
cat >> "$PATCH_FILE" << EOF
certSANs:
- ${EXT_BALANCER_IP}
EOF
fi
# Добавляем блок kernel для drbd, если нужно
if [[ "$USE_DRBD" == "y" ]] && (( WORKER_COUNT == 0 )); then
cat >> "$PATCH_FILE" << 'EOF'
kernel:
modules:
- name: drbd
parameters:
- usermode_helper=disabled
EOF
fi
# Добавляем основную часть machine и начало cluster
cat >> "$PATCH_FILE" << EOF
network:
nameservers:
- ${DNS1}
- ${DNS2}
install:
disk: ${DISK}
image: ${IMAGE}
time:
servers:
- ${NTP1}
- ${NTP2}
- ${NTP3}
cluster:
EOF
# Добавляем allowSchedulingOnControlPlanes и финальную часть
if (( WORKER_COUNT == 0 )); then
echo -e "\n${YELLOW}Воркеры отсутствуют. Разрешение на запуск подов на control plane...${NC}"
cat >> "$PATCH_FILE" << EOF
allowSchedulingOnControlPlanes: true
network:
cni:
name: none
proxy:
disabled: true
EOF
else
cat >> "$PATCH_FILE" << EOF
network:
cni:
name: none
proxy:
disabled: true
EOF
fi
CP_IPS=()
# Генерация патчей для control plane
echo -e "\n${GREEN}--- Настройка Control Plane нод ---${NC}"
for i in $(seq 1 $CP_COUNT); do
while true; do
read -p "Введите IP адрес для control plane $i (например, 192.168.23.5${i}): " CP_IP
if [[ -z "$CP_IP" ]]; then
echo -e "${YELLOW}IP адрес не может быть пустым.${NC}"
continue
fi
if contains_element "$CP_IP" "${USED_IPS[@]}"; then
echo -e "${YELLOW}Этот IP адрес уже используется. Введите уникальный адрес.${NC}"
else
CP_IPS+=("$CP_IP")
USED_IPS+=("$CP_IP")
break
fi
done
HOSTNAME="cp-$i"
FILENAME="$CONFIG_DIR/cp$i.patch"
# Создание базового патча
cat > "$FILENAME" << EOF
machine:
network:
hostname: $HOSTNAME
interfaces:
- interface: $INTERFACE_NAME
dhcp: false
addresses:
- $CP_IP/$NETMASK
EOF
# Добавление VIP, если он используется
if [[ "$USE_VIP" == "y" && -n "$VIP_IP" ]]; then
cat >> "$FILENAME" << EOF
vip:
ip: $VIP_IP
EOF
fi
# Добавление маршрутов
cat >> "$FILENAME" << EOF
routes:
- network: 0.0.0.0/0
gateway: $GATEWAY
EOF
echo "Создан файл: $FILENAME"
done
# Генерация патчей для worker-нод
if (( WORKER_COUNT > 0 )); then
echo -e "\n${GREEN}--- Настройка Worker нод ---${NC}"
for i in $(seq 1 $WORKER_COUNT); do
while true; do
read -p "Введите IP адрес для worker $i (например, 192.168.23.10${i}): " WORKER_IP
if [[ -z "$WORKER_IP" ]]; then
echo -e "${YELLOW}IP адрес не может быть пустым.${NC}"
continue
fi
if contains_element "$WORKER_IP" "${USED_IPS[@]}"; then
echo -e "${YELLOW}Этот IP адрес уже используется. Введите уникальный адрес.${NC}"
else
USED_IPS+=("$WORKER_IP")
break
fi
done
HOSTNAME="worker-$i"
FILENAME="$CONFIG_DIR/worker$i.patch"
cat > "$FILENAME" << EOF
machine:
network:
hostname: $HOSTNAME
interfaces:
- deviceSelector:
physical: true
dhcp: false
addresses:
- $WORKER_IP/$NETMASK
routes:
- network: 0.0.0.0/0
gateway: $GATEWAY
EOF
# Добавляем drbd если выбрано
if [[ "$USE_DRBD" == "y" ]]; then
cat >> "$FILENAME" << EOF
kernel:
modules:
- name: drbd
parameters:
- usermode_helper=disabled
EOF
fi
echo "Создан файл: $FILENAME"
done
fi
# --- Вывод команд для выполнения ---
echo -e "\n${YELLOW}--------------------------------------------------${NC}"
echo -e "${GREEN}Конфигурация завершена. Собираю файлы в кучу:${NC}"
echo -e "${YELLOW}--------------------------------------------------${NC}"
# Генерация секретов
talosctl gen secrets -o $CONFIG_DIR/secrets.yaml
# Определение эндпоинта
ENDPOINT_IP=""
if [[ "$USE_VIP" == "y" && -n "$VIP_IP" ]]; then
ENDPOINT_IP=$VIP_IP
else
FIRST_CP_FULL_IP=${CP_IPS[0]}
ENDPOINT_IP=$(echo "$FIRST_CP_FULL_IP" | cut -d'/' -f1)
fi
# Генерация основной конфигурации
cd $CONFIG_DIR
echo "talosctl gen config --kubernetes-version $K8S_VERSION --with-secrets secrets.yaml $CLUSTER_NAME https://${ENDPOINT_IP}:6443 --config-patch @patch.yaml"
talosctl gen config --kubernetes-version $K8S_VERSION --with-secrets secrets.yaml $CLUSTER_NAME https://${ENDPOINT_IP}:6443 --config-patch @patch.yaml
# Применение патчей к control plane
for i in $(seq 1 $CP_COUNT); do
talosctl machineconfig patch controlplane.yaml --patch @cp$i.patch --output cp$i.yaml
echo "Создан файл: $CONFIG_DIR/cp$i.yaml"
done
# Применение патчей к worker-нодам
if (( WORKER_COUNT > 0 )); then
for i in $(seq 1 $WORKER_COUNT); do
talosctl machineconfig patch worker.yaml --patch @worker$i.patch --output worker$i.yaml
echo "Создан файл: $CONFIG_DIR/worker$i.yaml"
done
fi
# Обновление talosconfig с endpoints
echo -e "\n${GREEN}--- Обновление talosconfig ---${NC}"
# Создаем массив endpoints
ENDPOINTS=()
# Добавляем все control plane IPs
for cp_ip in "${CP_IPS[@]}"; do
ENDPOINTS+=("$cp_ip")
done
# Добавляем VIP если есть
if [[ "$USE_VIP" == "y" && -n "$VIP_IP" ]]; then
ENDPOINTS+=("$VIP_IP")
fi
# Добавляем внешний балансировщик если есть
if [[ "$USE_EXT_BALANCER" == "y" && -n "$EXT_BALANCER_IP" ]]; then
ENDPOINTS+=("$EXT_BALANCER_IP")
fi
# Объединяем endpoints через запятую с пробелом
ENDPOINTS_STRING=$(IFS="," ; echo "${ENDPOINTS[*]}")
# Обновляем talosconfig
if [[ -f "talosconfig" ]]; then
TMP_CONFIG=$(mktemp)
while IFS= read -r line; do
if [[ "$line" == *"endpoints: []"* ]]; then
echo "${line/endpoints: []/endpoints: [$ENDPOINTS_STRING]}" >> "$TMP_CONFIG"
else
echo "$line" >> "$TMP_CONFIG"
fi
done < "talosconfig"
mv "$TMP_CONFIG" "talosconfig"
echo -e "${GREEN}Обновлен talosconfig с endpoints: [$ENDPOINTS_STRING]${NC}"
else
echo -e "${YELLOW}Файл talosconfig не найден${NC}"
fi
cd ..
echo "Работа скрипта завершена"

52
talos-k8s/run-stand.sh Executable file
View File

@@ -0,0 +1,52 @@
#!/bin/bash
VERB=${1:-"create"}
nodeip=pve1-nsk.bildme.ru
nodeid=pve1-nsk
poolid=test-vm
vmid_template=777
vmid=80
vm_name=t8s-demo
controllers=3
workers=2
echo "${VERB} stand"
[[ $VERB == "delete" ]] && {
for i in $(seq 1 "$controllers")
do
ssh root@${nodeip} "pvesh create /nodes/${nodeid}/qemu/${vmid}${i}/status/stop"
ssh root@${nodeip} "pvesh delete /nodes/${nodeid}/qemu/${vmid}${i}"
done
for i in $(seq 1 "$workers")
do
ssh root@${nodeip} "pvesh create /nodes/${nodeid}/qemu/${vmid}$((i + 4))/status/stop"
ssh root@${nodeip} "pvesh delete /nodes/${nodeid}/qemu/${vmid}$((i + 4))"
done
cd config
rm talosconfig cp1.yaml cp2.yaml cp3.yaml secrets.yaml controlplane.yaml worker.yaml
cd ..
exit 0
}
[[ $VERB == "create" ]] && {
for i in $(seq 1 "$controllers")
do
ssh root@${nodeip} "pvesh create /nodes/${nodeid}/qemu/${vmid_template}/clone --newid ${vmid}${i} --full false --name ${vm_name}-cp-${i}"
ssh root@${nodeip} "pvesh set /nodes/${nodeid}/qemu/${vmid}${i}/config --cores 2 --vcpus 2 --memory 6144"
ssh root@${nodeip} "pvesh set /pools/${poolid} --vms "${vmid}${i}""
ssh root@${nodeip} "pvesh create /nodes/${nodeid}/qemu/${vmid}${i}/status/start"
done
for i in $(seq 1 "$workers")
do
ssh root@${nodeip} "pvesh create /nodes/${nodeid}/qemu/${vmid_template}/clone --newid ${vmid}$((i + 4)) --full false --name ${vm_name}-w-${i}"
ssh root@${nodeip} "pvesh set /nodes/${nodeid}/qemu/${vmid}$((i + 4))/config --cores 2 --vcpus 4 --memory 12288"
ssh root@${nodeip} "pvesh set /pools/${poolid} --vms "${vmid}$((i + 4))""
ssh root@${nodeip} "pvesh create /nodes/${nodeid}/qemu/${vmid}$((i + 4))/status/start"
done
}