This commit is contained in:
Vassiliy Yegorov
2021-05-06 15:02:42 +07:00
commit 05b0d36bb3
16 changed files with 260 additions and 0 deletions

4
Cluster/.gitignore vendored Normal file
View File

@@ -0,0 +1,4 @@
cluster.rkestate
kube_config_cluster.yml
tls/*.key
tls/*.crt

4
Cluster/0-docker.sh Executable file
View File

@@ -0,0 +1,4 @@
#!/bin/bash
# to all nodes
apt update && apt install -y curl && curl https://get.docker.com -o install.sh && sh install.sh

7
Cluster/1-rke.sh Executable file
View File

@@ -0,0 +1,7 @@
#!/bin/bash
# brew install rke
# https://rancher.com/docs/rke/latest/en/installation/
# create cluster
rke up

10
Cluster/2-add-config.sh Executable file
View File

@@ -0,0 +1,10 @@
#!/bin/bash
# add config file
cat kube_config_cluster.yml > ~/.kube/k8s-hls
# enable config auth
export KUBECONFIG=$(find ~/.kube -maxdepth 1 -type f -name '*' | tr "\n" ":")
# test auth to cluster
kubectl get pods --all-namespaces

25
Cluster/3-init-certmanager.sh Executable file
View File

@@ -0,0 +1,25 @@
#!/bin/bash
# Install the CustomResourceDefinition resources separately
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.crds.yaml
# Create the namespace for cert-manager
kubectl create namespace cert-manager
# Add the Jetstack Helm repository
helm repo add cert-manager https://charts.jetstack.io
# Update your local Helm chart repository cache
helm repo update
helm upgrade --install \
cert-manager \
cert-manager/cert-manager \
--namespace cert-manager \
--version 1.3.1
kubectl -n cert-manager rollout status deploy/cert-manager
sleep 5
kubectl apply -f certmanager/orc-letsencrypt-issuer.yaml

View File

@@ -0,0 +1,15 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: orc-letsencrypt-issuer
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: vasyakrg@gmail.com
privateKeySecretRef:
name: orc-letsencrypt-private-key
solvers:
- selector: {}
http01:
ingress:
class: nginx

64
Cluster/cluster.yml Normal file
View File

@@ -0,0 +1,64 @@
cluster_name: k8s-cluster
name: k8s-cluster
enable_cluster_alerting: false
enable_cluster_monitoring: false
ignore_docker_version: true
kubernetes_version: v1.19.9-rancher1-1
nodes:
- address: 65.21.148.66
internal_address: 10.0.0.3
hostname_override: node1
user: root
labels:
worker: yes
location: nsk
role: [controlplane, worker, etcd]
- address: 65.21.146.162
internal_address: 10.0.0.2
hostname_override: node2
user: root
labels:
worker: yes
location: nsk
role: [controlplane, worker, etcd]
- address: 65.21.149.204
internal_address: 10.0.0.4
hostname_override: node3
user: root
labels:
worker: yes
location: nsk
role: [controlplane, worker, etcd]
services:
etcd:
snapshot: true
creation: 6h
retention: 30h
kube-controller:
extra_args:
terminated-pod-gc-threshold: 100
kubelet:
extra_args:
max-pods: 250
kube-api:
extra_args:
feature-gates: "ServiceAccountIssuerDiscovery=false,RemoveSelfLink=false"
authentication:
strategy: x509
sans:
- "167.233.11.162"
- "rke.k8s-nsk.tk"
dns:
provider: coredns
upstreamnameservers:
- 8.8.8.8
- 8.8.4.4
ingress:
provider: nginx
options:
use-forwarded-headers: "true"