1
0
Fork 0

refactor: move everything under clusters/bee/

This commit is contained in:
Vojtěch Mareš 2022-12-16 15:59:25 +01:00
parent cdf747fc8d
commit b38e8e62c9
Signed by: vojtech.mares
GPG key ID: C6827B976F17240D
44 changed files with 0 additions and 0 deletions

5
clusters/bee/Makefile Normal file
View file

@ -0,0 +1,5 @@
.PHONY: upgrade-argocd
upgrade-argocd:
helm repo add argo https://argoproj.github.io/argo-helm
(cd _argocd && helm dependency update)
helm upgrade argocd ./_argocd --namespace argocd

View file

@ -0,0 +1,6 @@
dependencies:
- name: argo-cd
repository: https://argoproj.github.io/argo-helm
version: 5.13.8
digest: sha256:5f2fae75fd5d521cb16ed47d97def8b5da6ef5d13f22e21fb94ae6a5a6648312
generated: "2022-11-17T14:17:48.512932+01:00"

View file

@ -0,0 +1,11 @@
apiVersion: v2
name: argocd
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.0"
dependencies:
- name: argo-cd
version: 5.13.8
repository: https://argoproj.github.io/argo-helm

View file

@ -0,0 +1,18 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-of-apps
namespace: argocd
spec:
destination:
namespace: argocd
server: https://kubernetes.default.svc
project: default
source:
path: apps
repoURL: https://gitlab.mareshq.com/gitops/mareshq/fox.git
targetRevision: HEAD
syncPolicy:
automated:
prune: true
selfHeal: true

View file

@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cert-manager
namespace: argocd
spec:
destination:
namespace: cert-manager
server: https://kubernetes.default.svc
project: default
source:
path: cluster-components/cert-manager
repoURL: https://gitlab.mareshq.com/gitops/mareshq/bee.git
targetRevision: HEAD
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,18 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: hcloud-cloud-controller-manager
namespace: argocd
spec:
destination:
namespace: kube-system
server: https://kubernetes.default.svc
project: default
source:
path: cluster-components/hcloud-cloud-controller-manager
repoURL: https://gitlab.mareshq.com/gitops/mareshq/bee.git
targetRevision: HEAD
syncPolicy:
automated:
prune: true
selfHeal: true

View file

@ -0,0 +1,18 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: hcloud-csi-driver
namespace: argocd
spec:
destination:
namespace: kube-system
server: https://kubernetes.default.svc
project: default
source:
path: deploy/kubernetes
repoURL: https://github.com/hetznercloud/csi-driver.git
targetRevision: v2.0.0
syncPolicy:
automated:
prune: true
selfHeal: true

View file

@ -0,0 +1,18 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: hcloud-token
namespace: argocd
spec:
destination:
namespace: kube-system
server: https://kubernetes.default.svc
project: default
source:
path: cluster-components/hcloud-token
repoURL: https://gitlab.mareshq.com/gitops/mareshq/bee.git
targetRevision: HEAD
syncPolicy:
automated:
prune: true
selfHeal: true

View file

@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ingress-nginx
namespace: argocd
spec:
destination:
namespace: ingress-nginx
server: https://kubernetes.default.svc
project: default
source:
path: cluster-components/ingress-nginx
repoURL: https://gitlab.mareshq.com/gitops/mareshq/bee.git
targetRevision: HEAD
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,22 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: logging
namespace: argocd
spec:
destination:
namespace: logging
server: https://kubernetes.default.svc
project: default
source:
path: cluster-components/logging
repoURL: https://gitlab.mareshq.com/gitops/mareshq/bee.git
targetRevision: HEAD
helm:
skipCrds: true
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: longhorn
namespace: argocd
spec:
destination:
namespace: longhorn-system
server: https://kubernetes.default.svc
project: default
source:
path: cluster-components/longhorn
repoURL: https://gitlab.mareshq.com/gitops/mareshq/bee.git
targetRevision: HEAD
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: monitoring
namespace: argocd
spec:
destination:
namespace: monitoring
server: https://kubernetes.default.svc
project: default
source:
path: cluster-components/monitoring
repoURL: https://gitlab.mareshq.com/gitops/mareshq/bee.git
targetRevision: HEAD
helm:
skipCrds: true
syncPolicy:
automated:
prune: true
selfHeal: true

View file

@ -0,0 +1,21 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: prometheus-operator-crds
namespace: argocd
spec:
destination:
namespace: monitoring
server: https://kubernetes.default.svc
project: default
source:
path: example/prometheus-operator-crd
repoURL: https://github.com/prometheus-operator/prometheus-operator.git
targetRevision: v0.60.1
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- Replace=true

View file

@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: registry
namespace: argocd
spec:
destination:
namespace: registry
server: https://kubernetes.default.svc
project: default
source:
path: apps/registry
repoURL: https://gitlab.mareshq.com/gitops/mareshq/bee.git
targetRevision: HEAD
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,18 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: sealed-secrets
namespace: argocd
spec:
destination:
namespace: kube-system
server: https://kubernetes.default.svc
project: default
source:
path: cluster-components/sealed-secrets
repoURL: https://gitlab.mareshq.com/gitops/mareshq/bee.git
targetRevision: HEAD
syncPolicy:
automated:
prune: true
selfHeal: true

View file

@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: system-upgrade-controller
namespace: argocd
spec:
destination:
namespace: system-upgrade
server: https://kubernetes.default.svc
project: default
source:
path: manifests
repoURL: https://github.com/rancher/system-upgrade-controller.git
targetRevision: v0.10.0
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,18 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: upgrade-plans
namespace: argocd
spec:
destination:
namespace: system-upgrade
server: https://kubernetes.default.svc
project: default
source:
path: cluster-components/upgrade-plans
repoURL: https://gitlab.mareshq.com/gitops/mareshq/bee.git
targetRevision: HEAD
syncPolicy:
automated:
prune: true
selfHeal: true

View file

@ -0,0 +1,20 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vault
namespace: argocd
spec:
destination:
namespace: vault
server: https://kubernetes.default.svc
project: default
source:
path: apps/vault
repoURL: https://gitlab.mareshq.com/gitops/mareshq/bee.git
targetRevision: HEAD
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,59 @@
argo-cd:
# High Availability
redis-ha:
enabled: true
controller:
replicas: 1
repoServer:
replicas: 2
pdb:
enabled: true
minAvailable: 1
applicationSet:
replicas: 2
server:
replicas: 2
pdb:
enabled: true
minAvailable: 1
# configEnabled: false
# config:
# url: "https://argocd.fox.k8s.vxm.cz/"
# dex.config: |
# connectors:
# - type: gitlab
# id: gitlab
# name: GitLab
# config:
# baseURL: https://gitlab.mareshq.com
# clientID: e699c44be6e14f5d9385b84c022d9a7a08196f374173aaa9ad19b2d20a563050
# clientSecret: 953b3dd97b2b1d1a71dbec2c0204774bd0f1b5c94fea988ac23e795577e8c6eb
# # redirectURI is filled by ArgoCD
# # redirectURI: https://argocd.ant.k8s.vxm.cz/api/dex/callback
# groups:
# - infrastructure/mareshq
# # flag which will switch from using the internal GitLab id to the users handle (@mention) as the user id.
# # It is possible for a user to change their own user name but it is very rare for them to do so
# useLoginAsID: false
ingress:
enabled: true
hosts:
- argocd.cloud.mareshq.com
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: letsencrypt-mareshq
kubernetes.io/tls-acme: "true"
nginx.ingress.kubernetes.io/server-snippet: |
proxy_ssl_verify off;
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
tls:
- hosts:
- argocd.cloud.mareshq.com
secretName: argocd-tls

View file

@ -0,0 +1,11 @@
apiVersion: v2
name: vault
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.0"
dependencies:
- name: simple-registry
version: 0.4.0
repository: https://helm.sikalabs.io

View file

@ -0,0 +1,10 @@
simple-registry:
host: private.docker.vmpkg.com
image: registry:2
storage: 20Gi
proxyBodySize: 500m
clusterIssuer: letsencrypt-mareshq
registryUser: vojtechmares
registryPassword: 8C8dLnnZVtQcWEDHhtuc
hostReadOnly: docker.vmpkg.com

View file

@ -0,0 +1,11 @@
apiVersion: v2
name: vault
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.0"
dependencies:
- name: vault
version: 0.21.0
repository: https://helm.releases.hashicorp.com

View file

@ -0,0 +1,39 @@
vault:
global:
tlsDisable: true
injector:
enabled: false
csi:
enabled: true
server:
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: letsencrypt-mareshq
pathType: Prefix
tls:
- secretName: vault-tls
hosts:
- vault.cloud.mareshq.com
hosts:
- host: vault.cloud.mareshq.com
paths:
- /
dataStorage:
enabled: true
size: 1Gi
auditStorage:
enabled: true
size: 1Gi
ha:
enabled: true
replicas: 3
raft:
enabled: true

View file

@ -0,0 +1,6 @@
dependencies:
- name: cert-manager
repository: https://charts.jetstack.io
version: v1.9.1
digest: sha256:b3c1f49120842cf9da8e5e7c2102b73735c32eefcaacfcffe0bcd0ae85b2c9bc
generated: "2022-08-20T16:10:41.619453+02:00"

View file

@ -0,0 +1,11 @@
apiVersion: v2
name: cert-manager
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.0"
dependencies:
- name: cert-manager
version: 1.9.1
repository: https://charts.jetstack.io

View file

@ -0,0 +1,16 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: cloudflare-api-key-secret
namespace: cert-manager
spec:
encryptedData:
api-key: AgA4DEU7+IVPXI4m5SmuMJrOMPTELEVfVWqFp4tqURHM7cl8NZrYjtQxlSQbn/dv0w/lPGd8InJePdVg/N1Q49xn5a81joQ8F/xMi5QQF/qe2rY5W7UnRzimabF7x1wkqXb1xVCrMYpF8y+ZmN3o1DheCArmwUWEd8AJr9t7aBw+Z594DZUcV36gMYrUI9vPOoRUeX0dKfvtn1xYH5k5ZqG3lioyMV6dTHX+3oRJrFdmbA17a4llXg0hZnG6syIb0xG7sccQlZr+y+t70JjbHSC3oC8L8p1YGaADueMsG/4mWZTSZj2Iqi1jMd4Qf+Du5qRtOUHMl4DhN5CFcelycHD/Itp6sS+AePZdMXutdNFIcpErv2gYGW16LuqIAV85Os5b2coLJjCf/37f92p354Ezo/sf//8a/edRUU/AFIPeXpilUUXRoO4XrvYQbTe1LHpfKvOowmv88riOWIkzCsT2ufAMFcIgagopyAQBWCDs2roD+IG/9/yQrB9YlT4+fMmcAe518HszT5WuDIeMfpX3tbKlfS7YKcvgeAiylnW77dBw6UAfmIUmu4eHXtKsFt+9DybRZS8+/Ly6Hs3625BCWgVH1t9/MjqxsEAKJIvMmdiBuUmG9cKmEeG0TIQm/PKf3XIixoH7xVHtR3aJd9xoSfmqfFXpVCkBlaTS4V+PMQFiGiLyUngq/qZ7tzMtHT5YediAm+R9K2U9DKISy7X2fW/ZHNe5gB1vIErmSdW3Mv+1CtMZQw==
template:
data: null
metadata:
creationTimestamp: null
name: cloudflare-api-key-secret
namespace: cert-manager

View file

@ -0,0 +1,20 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-mareshq
spec:
acme:
email: me+infra@vojtechmares.com
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-mareshq
solvers:
- http01:
ingress:
class: nginx
- dns01:
cloudflare:
email: iam@vojtechmares.com
apiKeySecretRef:
name: cloudflare-api-key-secret
key: api-key

View file

@ -0,0 +1,14 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
email: me+infra@vojtechmares.com
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: nginx

View file

@ -0,0 +1,2 @@
cert-manager:
installCRDs: true

View file

@ -0,0 +1,78 @@
# NOTE: this release was tested against kubernetes v1.18.x
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cloud-controller-manager
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: hcloud-cloud-controller-manager
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: hcloud-cloud-controller-manager
template:
metadata:
labels:
app: hcloud-cloud-controller-manager
spec:
serviceAccountName: cloud-controller-manager
dnsPolicy: Default
tolerations:
# this taint is set by all kubelets running `--cloud-provider=external`
# so we should tolerate it to schedule the cloud controller manager
- key: "node.cloudprovider.kubernetes.io/uninitialized"
value: "true"
effect: "NoSchedule"
- key: "CriticalAddonsOnly"
operator: "Exists"
# cloud controller manages should be able to run on masters
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
- key: "node.kubernetes.io/not-ready"
effect: "NoSchedule"
containers:
- image: hetznercloud/hcloud-cloud-controller-manager:v1.13.2
name: hcloud-cloud-controller-manager
command:
- "/bin/hcloud-cloud-controller-manager"
- "--cloud-provider=hcloud"
- "--leader-elect=false"
- "--allow-untagged-cloud"
resources:
requests:
cpu: 100m
memory: 50Mi
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: HCLOUD_TOKEN
valueFrom:
secretKeyRef:
name: hcloud
key: token
priorityClassName: system-cluster-critical

View file

@ -0,0 +1,15 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: hcloud
namespace: kube-system
spec:
encryptedData:
token: AgAsqlduqKD1e5NYP2y51RwhpEU2axAUt35JTn8RBSxQpAyFpaoyN6d6LS/AL90NOp1bQ4AHhr3Fyl9q2bHOfdYMF8oEaMoDUU726lrdh/FCUvwg2zLjRX/eENHruo7/K8LFoqhEB3LQSRYxNp5J8CFWlxeoHasOz0XnBexTjnRlU+o1UvNABAFA8gkFPYy+ZzC2aIsGGOR5t4jIQ28Eh2lgv8Xl9ulXMfa6pGX/mMY5DY2ow1DYwbsy13STGMHPjUUNGkzEGSq3wuiqwcCySFUcrq4UB3txsTktccct034FaCTnltwO/QwMBaIQDUgnsNNe5ME+5uY7JfW1FBzwGh/FgdcZo2jRnfUrci+Dw78paR0sc5JDlEfpwQSbFTKU4alC9n7sA8ix0ZAb4LHvnPleksO9FZ6GOY4OXnGksRE29Lo+JDYE7nR3ETgh9agjpzNppGJ3j5Rp5ks6VrxUSdoFm51vRDiJIXW/SkieIVtCV+frCnJk7lbkOvRvsiCuy1e41AkzuThRI4AA+bJOn1ELK8sc4ppytkkDwuYVSQBLnV3wjz/oWscOZnFjQG06DEr2whsYpgLtT+YtD//PdTGE4JZtoDi4UJOFr2qvVvafuObQ2c1G6DFHonjzjmdbcCXJuekR58H6Oag2555zuvIk2alncHuWAxg2EgbiqCfIOuEfqgdVWk0Q9MqZxyIIjwpLSV38J06zZyxfWo1aqUxEkK9vPixugTbfP57ACde3txGQRunlDeF6MuS8rCbtAxXAUojxw3RL1JIfmrNCtdZ0
template:
metadata:
creationTimestamp: null
name: hcloud
namespace: kube-system

View file

@ -0,0 +1,6 @@
dependencies:
- name: ingress-nginx
repository: https://kubernetes.github.io/ingress-nginx
version: 4.2.1
digest: sha256:634bcad7bd074bb515df6896f885ae25690479b90789dee1925d77d509052e63
generated: "2022-08-20T15:08:03.043219+02:00"

View file

@ -0,0 +1,11 @@
apiVersion: v2
name: ingress-nginx
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.0"
dependencies:
- name: ingress-nginx
version: 4.2.1
repository: https://kubernetes.github.io/ingress-nginx

View file

@ -0,0 +1,22 @@
ingress-nginx:
controller:
kind: DaemonSet
ingressClassResource:
default: true
hostPort:
enabled: true
service:
type: ClusterIP
metrics:
enabled: false
serviceMonitor:
enabled: false
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1

View file

@ -0,0 +1,9 @@
dependencies:
- name: promtail
repository: https://grafana.github.io/helm-charts
version: 6.3.0
- name: loki
repository: https://grafana.github.io/helm-charts
version: 3.0.1
digest: sha256:e4b0771d0c738fe26c8770cdd6fbc564c31b69b052516de22e4604ee6f2bcb3b
generated: "2022-09-08T16:03:37.238201+02:00"

View file

@ -0,0 +1,14 @@
apiVersion: v2
name: logging
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.0"
dependencies:
- name: promtail
version: 6.3.0
repository: https://grafana.github.io/helm-charts
- name: loki
version: 3.0.1
repository: https://grafana.github.io/helm-charts

View file

@ -0,0 +1,27 @@
promtail:
fullnameOverride: promtail
loki:
fullnameOverride: loki
write:
persistence:
size: 20Gi
storageClass: hcloud-volumes
read:
persistence:
size: 20Gi
storageClass: hcloud-volumes
singleBinary:
persistence:
storageClass: hcloud-volumes
monitoring:
# Do not collect the logs of Loki inside Loki
selfMonitoring:
enabled: false
grafanaAgent:
installOperator: false

View file

@ -0,0 +1,12 @@
dependencies:
- name: kube-prometheus-stack
repository: https://prometheus-community.github.io/helm-charts
version: 42.0.0
- name: node-problem-detector
repository: https://charts.deliveryhero.io/
version: 2.3.1
- name: prometheus-blackbox-exporter
repository: https://prometheus-community.github.io/helm-charts
version: 7.1.3
digest: sha256:542706746c80627130002358060f2b9b327578d6f588912069621eba92b88d27
generated: "2022-11-23T13:32:02.288338+01:00"

View file

@ -0,0 +1,17 @@
apiVersion: v2
name: prometheus-stack
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.0"
dependencies:
- name: kube-prometheus-stack
version: 42.0.0
repository: https://prometheus-community.github.io/helm-charts
- name: node-problem-detector
version: 2.3.1
repository: https://charts.deliveryhero.io/
- name: prometheus-blackbox-exporter
version: 7.1.3
repository: https://prometheus-community.github.io/helm-charts

View file

@ -0,0 +1,153 @@
kube-prometheus-stack:
cleanPrometheusOperatorObjectNames: true
fullnameOverride: "monitoring"
prometheus:
prometheusSpec:
serviceMonitorSelectorNilUsesHelmValues: false
podMonitorSelectorNilUsesHelmValues: false
ruleSelectorNilUsesHelmValues: false
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: hcloud-volumes
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi
ingress:
enabled: true
pathType: Prefix
annotations:
cert-manager.io/cluster-issuer: letsencrypt-mareshq
# nginx.ingress.kubernetes.io/auth-url: "https://auth.cloud.mareshq.com/oauth2/auth"
# nginx.ingress.kubernetes.io/auth-signin: "https://auth.cloud.mareshq.com/oauth2/start?rd=$scheme://$host$request_uri"
hosts:
- prometheus.cloud.mareshq.com
paths:
- /
tls:
- hosts:
- prometheus.cloud.mareshq.com
secretName: prometheus-tls
serviceMonitor:
selfMonitor: true
grafana:
fullnameOverride: grafana
defaultDashboardsEnabled: true
adminPassword: zud!edy7WER5uqg7gjq
ingress:
enabled: true
pathType: Prefix
annotations:
cert-manager.io/cluster-issuer: letsencrypt-mareshq
hosts:
- grafana.cloud.mareshq.com
paths:
- /
tls:
- hosts:
- grafana.cloud.mareshq.com
secretName: grafana-tls
serviceMonitor:
selfMonitor: true
# grafana.ini:
# server:
# root_url: https://grafana.cloud.mareshq.com
# auth.gitlab:
# enabled: true
# allow_sign_up: true
# client_id: bbe5e1c7a6bf81a6725b1fe7f5c64c96e9bc697bebd24d5e41b2cd623d38917b
# client_secret: c137d2af70f82994be0c2be6437cef97cdaa7801e6cb232510fd7f10209d0cdf
# scopes: read_api
# auth_url: https://gitlab.mareshq.com/oauth/authorize
# token_url: https://gitlab.mareshq.com/oauth/token
# api_url: https://gitlab.mareshq.com/api/v4
# allowed_groups: mareshq
# auth.basic:
# enabled: false
# auth:
# disable_login_form: false
persistence:
enabled: false
storageClassName: hcloud-volumes
alertmanager:
ingress:
enabled: true
pathType: Prefix
annotations:
cert-manager.io/cluster-issuer: letsencrypt-mareshq
# nginx.ingress.kubernetes.io/auth-url: "https://auth.cloud.mareshq.com/oauth2/auth"
# nginx.ingress.kubernetes.io/auth-signin: "https://auth.cloud.mareshq.com/oauth2/start?rd=$scheme://$host$request_uri"
hosts:
- alertmanager.cloud.mareshq.com
paths:
- /
tls:
- hosts:
- alertmanager.cloud.mareshq.com
secretName: alertmanager-tls
serviceMonitor:
selfMonitor: true
kubeApiServer:
enabled: true
kubelet:
enabled: true
kubeControllerManager:
enabled: false
coreDns:
enabled: true
kubeScheduler:
enabled: true
kubeProxy:
enabled: false
kubeStateMetrics:
enabled: true
nodeExporter:
enabled: true
prometheusOperator:
serviceMonitor:
selfMonitor: true
defaultRules:
create: false
prometheus-node-exporter:
fullnameOverride: node-exporter
kube-state-metrics:
fullnameOverride: kube-state-metrics
node-problem-detector:
fullnameOverride: node-problem-detector
metrics:
enabled: true
serviceMonitor:
enabled: true
prometheusRule:
enabled: true
prometheus-blackbox-exporter:
fullnameOverride: blackbox-exporter

View file

@ -0,0 +1,6 @@
dependencies:
- name: sealed-secrets
repository: https://bitnami-labs.github.io/sealed-secrets
version: 2.6.0
digest: sha256:8bfe338b7cd82e42a84b7ea83543043b9102cf9944adff28615af77ed879b1a6
generated: "2022-08-14T12:25:09.068985+02:00"

View file

@ -0,0 +1,11 @@
apiVersion: v2
name: sealed-secrets
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "1.0"
dependencies:
- name: sealed-secrets
version: 2.6.0
repository: https://bitnami-labs.github.io/sealed-secrets

View file

@ -0,0 +1,2 @@
sealed-secrets:
fullnameOverride: sealed-secrets-controller

View file

@ -0,0 +1,47 @@
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: k3s-server-v1.25
namespace: system-upgrade
labels:
k3s-upgrade: server
spec:
concurrency: 1 # Batch size (roughly maps to maximum number of unschedulable nodes)
channel: https://update.k3s.io/v1-release/channels/v1.25
nodeSelector:
matchExpressions:
- {key: k3s-upgrade, operator: Exists}
- {key: k3s-upgrade, operator: NotIn, values: ["disabled", "false"]}
- {key: k3os.io/mode, operator: DoesNotExist}
- {key: node-role.kubernetes.io/control-plane, operator: Exists}
serviceAccountName: system-upgrade
cordon: true
upgrade:
image: rancher/k3s-upgrade
---
apiVersion: upgrade.cattle.io/v1
kind: Plan
metadata:
name: k3s-agent-v1.25
namespace: system-upgrade
labels:
k3s-upgrade: agent
spec:
concurrency: 1 # Batch size (roughly maps to maximum number of unschedulable nodes)
channel: https://update.k3s.io/v1-release/channels/v1.25
nodeSelector:
matchExpressions:
- {key: k3s-upgrade, operator: Exists}
- {key: k3s-upgrade, operator: NotIn, values: ["disabled", "false"]}
- {key: k3os.io/mode, operator: DoesNotExist}
- {key: node-role.kubernetes.io/control-plane, operator: DoesNotExist}
serviceAccountName: system-upgrade
prepare:
# Defaults to the same "resolved" tag that is used for the `upgrade` container, NOT `latest`
image: rancher/k3s-upgrade
args: ["prepare", "k3s-server-v1.25"]
drain:
force: true
skipWaitForDeleteTimeout: 60 # 1.18+ (honor pod disruption budgets up to 60 seconds per pod then moves on)
upgrade:
image: rancher/k3s-upgrade