From 9c63fb579b344fe0bbe155d42dda8716358e6379 Mon Sep 17 00:00:00 2001 From: Vassiliy Yegorov Date: Tue, 23 Aug 2022 17:15:45 +0700 Subject: [PATCH] add external vault --- docs/external-vault-cluster.md | 93 +++ helm/vault/values-external.yaml | 1010 +++++++++++++++++++++++++++++++ k8s/vault-test.yaml | 6 +- 3 files changed, 1106 insertions(+), 3 deletions(-) create mode 100644 docs/external-vault-cluster.md create mode 100644 helm/vault/values-external.yaml diff --git a/docs/external-vault-cluster.md b/docs/external-vault-cluster.md new file mode 100644 index 0000000..3b70f6e --- /dev/null +++ b/docs/external-vault-cluster.md @@ -0,0 +1,93 @@ +# Подключаем внешний вольт в кубе + +1. В heml надо отметить: + +```yaml +injector: + externalVaultAddr: "https://vault.bildme.ru" +server: + enabled: false +``` + +2. проверяем токен сервисного юзера инжектора + +```bash +kubectl describe serviceaccount -n vault vault +``` + +3. только если куб >= 1.24 версии (если ниже, проверяем что секрет vault есть и этот шаг пропускаем) + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: vault-token-g955r + namespace: vault + annotations: + kubernetes.io/service-account.name: vault +type: kubernetes.io/service-account-token +``` + +4. подбираем себе в переменную имя секрета (ну или смотрим в линзе) + * проверьте, что у вас стоит утилита `jq` + +```bash +VAULT_HELM_SECRET_NAME=$(kubectl get secrets -n vault --output=json | jq -r '.items[].metadata | select(.name|startswith("vault-token-")).name') +``` + +## настроим k8s для работы с вольтом + +1. `vault auth enable kubernetes` +2. формируем JWT и нашего токена + +`TOKEN_REVIEW_JWT=$(kubectl get secret -n vault $VAULT_HELM_SECRET_NAME --output='go-template={{ .data.token }}' | base64 --decode)` + +3. выдергиваем CA-сертификат из куба + +`KUBE_CA_CERT=$(kubectl config view --raw --minify --flatten --output='jsonpath={.clusters[].cluster.certificate-authority-data}' | base64 --decode)` + +4. и получаем внешний адрес самого куба + +`KUBE_HOST=$(kubectl config view --raw --minify --flatten --output='jsonpath={.clusters[].cluster.server}')` + +5. заполняем конфиг подключения к кубу со стороны вольта + +```bash +vault write auth/kubernetes/config \ + token_reviewer_jwt="$TOKEN_REVIEW_JWT" \ + kubernetes_host="$KUBE_HOST" \ + kubernetes_ca_cert="$KUBE_CA_CERT" \ + issuer="https://kubernetes.default.svc.cluster.local" +``` + +## тестируем на деплое + +1. создаем политику доступа к конкретному секрету + +```bash +vault policy write k8s-policy - </`. The value below is + # an array of objects, examples are shown below. + extraVolumes: [] + # - type: secret (or "configMap") + # name: my-secret + # path: null # default is `/vault/userconfig` + + # volumes is a list of volumes made available to all containers. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumes: null + # - name: plugins + # emptyDir: {} + + # volumeMounts is a list of volumeMounts for the main server container. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumeMounts: null + # - mountPath: /usr/local/libexec/vault + # name: plugins + # readOnly: true + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + # This should be either a multi-line string or YAML matching the PodSpec's affinity field. + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }} + app.kubernetes.io/instance: "{{ .Release.Name }}" + component: server + topologyKey: kubernetes.io/hostname + + # Topology settings for server pods + # ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + # This should be either a multi-line string or YAML matching the topologySpreadConstraints array + # in a PodSpec. + topologySpreadConstraints: [] + + # Toleration Settings for server pods + # This should be either a multi-line string or YAML matching the Toleration array + # in a PodSpec. + tolerations: [] + + # nodeSelector labels for server pod assignment, formatted as a multi-line string or YAML map. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: + # beta.kubernetes.io/arch: amd64 + nodeSelector: {} + + # Enables network policy for server pods + networkPolicy: + enabled: false + egress: [] + # egress: + # - to: + # - ipBlock: + # cidr: 10.0.0.0/24 + # ports: + # - protocol: TCP + # port: 443 + + # Priority class for server pods + priorityClassName: "" + + # Extra labels to attach to the server pods + # This should be a YAML map of the labels to apply to the server pods + extraLabels: {} + + # Extra annotations to attach to the server pods + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the server pods + annotations: {} + + # Enables a headless service to be used by the Vault Statefulset + service: + enabled: true + # clusterIP controls whether a Cluster IP address is attached to the + # Vault service within Kubernetes. By default the Vault service will + # be given a Cluster IP address, set to None to disable. When disabled + # Kubernetes will create a "headless" service. Headless services can be + # used to communicate with pods directly through DNS instead of a round robin + # load balancer. + # clusterIP: None + + # Configures the service type for the main Vault service. Can be ClusterIP + # or NodePort. + #type: ClusterIP + + # Do not wait for pods to be ready + publishNotReadyAddresses: true + + # The externalTrafficPolicy can be set to either Cluster or Local + # and is only valid for LoadBalancer and NodePort service types. + # The default value is Cluster. + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy + externalTrafficPolicy: Cluster + + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #nodePort: 30000 + + # When HA mode is enabled + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #activeNodePort: 30001 + + # When HA mode is enabled + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #standbyNodePort: 30002 + + # Port on which Vault server is listening + port: 8200 + # Target port to which the service should be mapped to + targetPort: 8200 + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the service. + annotations: {} + + # This configures the Vault Statefulset to create a PVC for data + # storage when using the file or raft backend storage engines. + # See https://www.vaultproject.io/docs/configuration/storage/index.html to know more + dataStorage: + enabled: true + # Size of the PVC created + size: 1Gi + # Location where the PVC will be mounted. + mountPath: "/vault/data" + # Name of the storage class to use. If null it will use the + # configured default Storage Class. + storageClass: null + # Access Mode of the storage device being used for the PVC + accessMode: ReadWriteOnce + # Annotations to apply to the PVC + annotations: {} + + # This configures the Vault Statefulset to create a PVC for audit + # logs. Once Vault is deployed, initialized and unsealed, Vault must + # be configured to use this for audit logs. This will be mounted to + # /vault/audit + # See https://www.vaultproject.io/docs/audit/index.html to know more + auditStorage: + enabled: false + # Size of the PVC created + size: 10Gi + # Location where the PVC will be mounted. + mountPath: "/vault/audit" + # Name of the storage class to use. If null it will use the + # configured default Storage Class. + storageClass: null + # Access Mode of the storage device being used for the PVC + accessMode: ReadWriteOnce + # Annotations to apply to the PVC + annotations: {} + + # Run Vault in "dev" mode. This requires no further setup, no state management, + # and no initialization. This is useful for experimenting with Vault without + # needing to unseal, store keys, et. al. All data is lost on restart - do not + # use dev mode for anything other than experimenting. + # See https://www.vaultproject.io/docs/concepts/dev-server.html to know more + dev: + enabled: false + + # Set VAULT_DEV_ROOT_TOKEN_ID value + devRootToken: "root" + + # Run Vault in "standalone" mode. This is the default mode that will deploy if + # no arguments are given to helm. This requires a PVC for data storage to use + # the "file" backend. This mode is not highly available and should not be scaled + # past a single replica. + standalone: + enabled: "-" + + # config is a raw string of default configuration when using a Stateful + # deployment. Default is to use a PersistentVolumeClaim mounted at /vault/data + # and store data there. This is only used when using a Replica count of 1, and + # using a stateful set. This should be HCL. + + # Note: Configuration files are stored in ConfigMaps so sensitive data + # such as passwords should be either mounted through extraSecretEnvironmentVars + # or through a Kube secret. For more information see: + # https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations + config: | + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + } + storage "file" { + path = "/vault/data" + } + + # Example configuration for using auto-unseal, using Google Cloud KMS. The + # GKMS keys must already exist, and the cluster must have a service account + # that is authorized to access GCP KMS. + #seal "gcpckms" { + # project = "vault-helm-dev" + # region = "global" + # key_ring = "vault-helm-unseal-kr" + # crypto_key = "vault-helm-unseal-key" + #} + + # Run Vault in "HA" mode. There are no storage requirements unless audit log + # persistence is required. In HA mode Vault will configure itself to use Consul + # for its storage backend. The default configuration provided will work the Consul + # Helm project by default. It is possible to manually configure Vault to use a + # different HA backend. + ha: + enabled: true + replicas: 3 + + # Set the api_addr configuration for Vault HA + # See https://www.vaultproject.io/docs/configuration#api_addr + # If set to null, this will be set to the Pod IP Address + apiAddr: null + + # Set the cluster_addr confuguration for Vault HA + # See https://www.vaultproject.io/docs/configuration#cluster_addr + # If set to null, this will be set to https://$(HOSTNAME).{{ template "vault.fullname" . }}-internal:8201 + clusterAddr: null + + # Enables Vault's integrated Raft storage. Unlike the typical HA modes where + # Vault's persistence is external (such as Consul), enabling Raft mode will create + # persistent volumes for Vault to store data according to the configuration under server.dataStorage. + # The Vault cluster will coordinate leader elections and failovers internally. + raft: + + # Enables Raft integrated storage + enabled: true + # Set the Node Raft ID to the name of the pod + setNodeId: false + + # Note: Configuration files are stored in ConfigMaps so sensitive data + # such as passwords should be either mounted through extraSecretEnvironmentVars + # or through a Kube secret. For more information see: + # https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations + config: | + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + } + + storage "raft" { + path = "/vault/data" + } + + service_registration "kubernetes" {} + + # config is a raw string of default configuration when using a Stateful + # deployment. Default is to use a Consul for its HA storage backend. + # This should be HCL. + + # Note: Configuration files are stored in ConfigMaps so sensitive data + # such as passwords should be either mounted through extraSecretEnvironmentVars + # or through a Kube secret. For more information see: + # https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations + config: | + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + } + storage "consul" { + path = "vault" + address = "HOST_IP:8500" + } + + service_registration "kubernetes" {} + + # Example configuration for using auto-unseal, using Google Cloud KMS. The + # GKMS keys must already exist, and the cluster must have a service account + # that is authorized to access GCP KMS. + #seal "gcpckms" { + # project = "vault-helm-dev-246514" + # region = "global" + # key_ring = "vault-helm-unseal-kr" + # crypto_key = "vault-helm-unseal-key" + #} + + # A disruption budget limits the number of pods of a replicated application + # that are down simultaneously from voluntary disruptions + disruptionBudget: + enabled: true + + # maxUnavailable will default to (n/2)-1 where n is the number of + # replicas. If you'd like a custom value, you can specify an override here. + maxUnavailable: null + + # Definition of the serviceAccount used to run Vault. + # These options are also used when using an external Vault server to validate + # Kubernetes tokens. + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # Extra annotations for the serviceAccount definition. This can either be + # YAML or a YAML-formatted multi-line templated string map of the + # annotations to apply to the serviceAccount. + annotations: {} + + # Settings for the statefulSet used to run Vault. + statefulSet: + # Extra annotations for the statefulSet. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the statefulSet. + annotations: {} + + # Set the pod and container security contexts. + # If not set, these will default to, and for *not* OpenShift: + # pod: + # runAsNonRoot: true + # runAsGroup: {{ .Values.server.gid | default 1000 }} + # runAsUser: {{ .Values.server.uid | default 100 }} + # fsGroup: {{ .Values.server.gid | default 1000 }} + # container: {} + # + # If not set, these will default to, and for OpenShift: + # pod: {} + # container: + # allowPrivilegeEscalation: false + securityContext: + pod: {} + container: {} + + +# Vault UI +ui: + # True if you want to create a Service entry for the Vault UI. + # + # serviceType can be used to control the type of service created. For + # example, setting this to "LoadBalancer" will create an external load + # balancer (for supported K8S installations) to access the UI. + enabled: false + publishNotReadyAddresses: true + # The service should only contain selectors for active Vault pod + activeVaultPodOnly: false + serviceType: "ClusterIP" + serviceNodePort: null + externalPort: 8200 + targetPort: 8200 + + # The externalTrafficPolicy can be set to either Cluster or Local + # and is only valid for LoadBalancer and NodePort service types. + # The default value is Cluster. + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy + externalTrafficPolicy: Cluster + + #loadBalancerSourceRanges: + # - 10.0.0.0/16 + # - 1.78.23.3/32 + + # loadBalancerIP: + + # Extra annotations to attach to the ui service + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the ui service + annotations: {} + +# secrets-store-csi-driver-provider-vault +csi: + # True if you want to install a secrets-store-csi-driver-provider-vault daemonset. + # + # Requires installing the secrets-store-csi-driver separately, see: + # https://github.com/kubernetes-sigs/secrets-store-csi-driver#install-the-secrets-store-csi-driver + # + # With the driver and provider installed, you can mount Vault secrets into volumes + # similar to the Vault Agent injector, and you can also sync those secrets into + # Kubernetes secrets. + enabled: false + + image: + repository: "hashicorp/vault-csi-provider" + tag: "1.2.0" + pullPolicy: IfNotPresent + + # volumes is a list of volumes made available to all containers. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumes: null + # - name: tls + # secret: + # secretName: vault-tls + + # volumeMounts is a list of volumeMounts for the main server container. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumeMounts: null + # - name: tls + # mountPath: "/vault/tls" + # readOnly: true + + resources: {} + # resources: + # requests: + # cpu: 50m + # memory: 128Mi + # limits: + # cpu: 50m + # memory: 128Mi + + # Settings for the daemonSet used to run the provider. + daemonSet: + updateStrategy: + type: RollingUpdate + maxUnavailable: "" + # Extra annotations for the daemonSet. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the daemonSet. + annotations: {} + # Provider host path (must match the CSI provider's path) + providersDir: "/etc/kubernetes/secrets-store-csi-providers" + # Kubelet host path + kubeletRootDir: "/var/lib/kubelet" + # Extra labels to attach to the vault-csi-provider daemonSet + # This should be a YAML map of the labels to apply to the csi provider daemonSet + extraLabels: {} + # security context for the pod template and container in the csi provider daemonSet + securityContext: + pod: {} + container: {} + + pod: + # Extra annotations for the provider pods. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the pod. + annotations: {} + + # Toleration Settings for provider pods + # This should be either a multi-line string or YAML matching the Toleration array + # in a PodSpec. + tolerations: [] + + # Extra labels to attach to the vault-csi-provider pod + # This should be a YAML map of the labels to apply to the csi provider pod + extraLabels: {} + + + + # Priority class for csi pods + priorityClassName: "" + + serviceAccount: + # Extra annotations for the serviceAccount definition. This can either be + # YAML or a YAML-formatted multi-line templated string map of the + # annotations to apply to the serviceAccount. + annotations: {} + + # Extra labels to attach to the vault-csi-provider serviceAccount + # This should be a YAML map of the labels to apply to the csi provider serviceAccount + extraLabels: {} + + # Used to configure readinessProbe for the pods. + readinessProbe: + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 5 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 3 + # Used to configure livenessProbe for the pods. + livenessProbe: + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 5 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 3 + + # Enables debug logging. + debug: false + + # Pass arbitrary additional arguments to vault-csi-provider. + # See https://www.vaultproject.io/docs/platform/k8s/csi/configurations#command-line-arguments + # for the available command line flags. + extraArgs: [] diff --git a/k8s/vault-test.yaml b/k8s/vault-test.yaml index 5af468d..d69a204 100644 --- a/k8s/vault-test.yaml +++ b/k8s/vault-test.yaml @@ -29,10 +29,10 @@ spec: app: vault-test annotations: vault.hashicorp.com/agent-inject: 'true' - vault.hashicorp.com/role: 'vault-test' - vault.hashicorp.com/agent-inject-secret-credentials: 'kv/secret/vault-test' + vault.hashicorp.com/role: 'k8s-role' + vault.hashicorp.com/agent-inject-secret-credentials: 'secret/k8s/config' vault.hashicorp.com/agent-inject-template-credentials: | - {{- with secret "kv/secret/vault-test" -}} + {{- with secret "secret/k8s/config" -}} postgresql://{{ .Data.data.username }}:{{ .Data.data.password }}@{{ .Data.data.psqlhost }}:5432/{{ .Data.data.database }} {{- end -}} spec: