Compare commits

..

512 Commits

Author SHA1 Message Date
renovate-bot 28e5bf90b5 chore(deps): update all-minor-patch-updates 2026-04-22 15:00:49 +00:00
williamp 7f65b1baef Merge pull request 'chore(deps): update all-minor-patch-updates' (#296) from renovate/all-minor-patch-updates into main
Reviewed-on: #296
2026-04-17 18:42:13 +00:00
renovate-bot 4fab22f7f1 chore(deps): update all-minor-patch-updates 2026-04-17 15:00:31 +00:00
williamp 5796307f10 argocd: add more 'nullBytePolicy: Ignore' to rest of external secrets 2026-04-15 10:53:59 -04:00
williamp e3858b302e argocd: add 'nullBytePolicy: Ignore' to externalsecret to avoid sync error 2026-04-15 10:52:17 -04:00
williamp b3a093d67d Merge pull request 'chore(deps): update hashicorp/vault docker tag to v2' (#295) from renovate/hashicorp-vault-2.x into main
Reviewed-on: #295
2026-04-15 14:40:42 +00:00
williamp c82138d4fe Merge pull request 'chore(deps): update all-minor-patch-updates' (#294) from renovate/all-minor-patch-updates into main
Reviewed-on: #294
2026-04-15 14:40:29 +00:00
renovate-bot 0d8af53572 chore(deps): update all-minor-patch-updates 2026-04-14 22:00:35 +00:00
renovate-bot 79751e2e7f chore(deps): update hashicorp/vault docker tag to v2 2026-04-14 21:01:03 +00:00
williamp 28ceaa11be monitoring: add nullbytePolicy to discord-webhook to fix argo sync issues 2026-04-10 12:22:25 -04:00
williamp 18eb6ac82f Merge pull request 'chore(deps): update all-minor-patch-updates' (#293) from renovate/all-minor-patch-updates into main
Reviewed-on: #293
2026-04-10 16:18:38 +00:00
renovate-bot 01fbbcef02 chore(deps): update all-minor-patch-updates 2026-04-10 15:00:31 +00:00
williamp dbf5e02807 Merge pull request 'chore(deps): update all-minor-patch-updates' (#292) from renovate/all-minor-patch-updates into main
Reviewed-on: #292
2026-04-09 01:43:46 +00:00
renovate-bot 379b2d9a2a chore(deps): update all-minor-patch-updates 2026-04-08 22:00:26 +00:00
williamp 9248d4c35a Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v83.2.0' (#291) from renovate/all-minor-patch-updates into main
Reviewed-on: #291
2026-04-08 18:18:43 +00:00
renovate-bot 2f5c7b4c07 chore(deps): update helm release kube-prometheus-stack to v83.2.0 2026-04-08 18:00:35 +00:00
williamp 74b24988be Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v83.1.0' (#290) from renovate/all-minor-patch-updates into main
Reviewed-on: #290
2026-04-08 12:57:08 +00:00
renovate-bot 2dd4502409 chore(deps): update helm release kube-prometheus-stack to v83.1.0 2026-04-08 10:01:18 +00:00
williamp 774cab3173 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v83.0.2' (#289) from renovate/all-minor-patch-updates into main
Reviewed-on: #289
2026-04-08 02:04:32 +00:00
renovate-bot abeaf0c881 chore(deps): update helm release kube-prometheus-stack to v83.0.2 2026-04-07 20:00:23 +00:00
williamp 7e57724ef8 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v83' (#288) from renovate/kube-prometheus-stack-83.x into main
Reviewed-on: #288
2026-04-07 15:22:36 +00:00
renovate-bot 1040199d45 chore(deps): update helm release kube-prometheus-stack to v83 2026-04-06 21:00:51 +00:00
williamp caae6992ee Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v82.17.1' (#286) from renovate/all-minor-patch-updates into main
Reviewed-on: #286
2026-04-04 17:03:23 +00:00
renovate-bot 5fcedff675 chore(deps): update helm release kube-prometheus-stack to v82.17.1 2026-04-04 12:00:28 +00:00
williamp 88f65aeb67 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v82.17.0' (#285) from renovate/all-minor-patch-updates into main
Reviewed-on: #285
2026-04-04 00:39:17 +00:00
renovate-bot 49f29523c4 chore(deps): update helm release kube-prometheus-stack to v82.17.0 2026-04-03 18:00:53 +00:00
williamp c75c56d28e Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v82.16.2' (#284) from renovate/all-minor-patch-updates into main
Reviewed-on: #284
2026-04-02 22:09:08 +00:00
renovate-bot 29fa53acc1 chore(deps): update helm release kube-prometheus-stack to v82.16.2 2026-04-02 22:00:30 +00:00
williamp 3e48ae6c4d Merge pull request 'chore(deps): update all-minor-patch-updates' (#283) from renovate/all-minor-patch-updates into main
Reviewed-on: #283
2026-04-01 22:13:38 +00:00
renovate-bot 2b4dbac471 chore(deps): update all-minor-patch-updates 2026-03-31 16:00:33 +00:00
williamp ac95705df1 Merge pull request 'chore(deps): update helm release kite to v0.9.0' (#282) from renovate/all-minor-patch-updates into main
Reviewed-on: #282
2026-03-28 18:27:57 +00:00
renovate-bot 0ae3340140 chore(deps): update helm release kite to v0.9.0 2026-03-28 18:00:21 +00:00
williamp 2427a823f4 Merge pull request 'chore(deps): update all-minor-patch-updates' (#281) from renovate/all-minor-patch-updates into main
Reviewed-on: #281
2026-03-28 00:57:52 +00:00
renovate-bot 127fdd54cf chore(deps): update all-minor-patch-updates 2026-03-27 20:00:42 +00:00
williamp 8df1305679 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v82.15.0' (#280) from renovate/all-minor-patch-updates into main
Reviewed-on: #280
2026-03-27 02:53:32 +00:00
renovate-bot 76f9c2e25c chore(deps): update helm release kube-prometheus-stack to v82.15.0 2026-03-26 23:00:24 +00:00
williamp 678e9381bd Merge pull request 'chore(deps): update all-minor-patch-updates' (#279) from renovate/all-minor-patch-updates into main
Reviewed-on: #279
2026-03-26 14:15:28 +00:00
renovate-bot 3966901b75 chore(deps): update all-minor-patch-updates 2026-03-25 21:00:25 +00:00
williamp 8543e4b41f Merge pull request 'chore(deps): update all-minor-patch-updates' (#278) from renovate/all-minor-patch-updates into main
Reviewed-on: #278
2026-03-25 01:48:29 +00:00
renovate-bot 7a85ea4c23 chore(deps): update all-minor-patch-updates 2026-03-24 22:00:28 +00:00
williamp 4eda2129f1 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v82.13.0' (#277) from renovate/all-minor-patch-updates into main
Reviewed-on: #277
2026-03-20 21:58:08 +00:00
renovate-bot cdc4c39728 chore(deps): update helm release kube-prometheus-stack to v82.13.0 2026-03-20 18:00:33 +00:00
williamp e46107ad02 Merge pull request 'chore(deps): update helm release external-secrets to v2.2.0' (#276) from renovate/all-minor-patch-updates into main
Reviewed-on: #276
2026-03-20 17:02:09 +00:00
renovate-bot 0121927f26 chore(deps): update helm release external-secrets to v2.2.0 2026-03-20 17:00:32 +00:00
williamp eef0b13ab0 Merge pull request 'chore(deps): update helm release traefik to v39.0.6' (#275) from renovate/all-minor-patch-updates into main
Reviewed-on: #275
2026-03-20 16:32:39 +00:00
renovate-bot 8c30536293 chore(deps): update helm release traefik to v39.0.6 2026-03-20 11:00:39 +00:00
williamp 841bd52da9 Merge pull request 'chore(deps): update all-minor-patch-updates' (#274) from renovate/all-minor-patch-updates into main
Reviewed-on: #274
2026-03-19 22:05:54 +00:00
renovate-bot 51e03a6e08 chore(deps): update all-minor-patch-updates 2026-03-19 16:00:34 +00:00
williamp 6a0074676d Merge pull request 'chore(deps): update helm release argo-cd to v9.4.12' (#273) from renovate/all-minor-patch-updates into main
Reviewed-on: #273
2026-03-17 20:48:07 +00:00
renovate-bot dbf1380515 chore(deps): update helm release argo-cd to v9.4.12 2026-03-17 11:01:17 +00:00
williamp 2504e99cc8 Merge pull request 'chore(deps): update all-minor-patch-updates' (#272) from renovate/all-minor-patch-updates into main
Reviewed-on: #272
2026-03-17 03:36:05 +00:00
williamp c50417c3da Merge pull request 'chore(deps): update helm release velero to v12' (#271) from renovate/velero-12.x into main
Reviewed-on: #271
2026-03-17 03:35:43 +00:00
renovate-bot 50b4f42a6e chore(deps): update all-minor-patch-updates 2026-03-16 22:00:26 +00:00
renovate-bot cd32ebd7c9 chore(deps): update helm release velero to v12 2026-03-16 05:00:27 +00:00
williamp 3e655ae613 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v82.10.4' (#270) from renovate/all-minor-patch-updates into main
Reviewed-on: #270
2026-03-15 15:48:47 +00:00
renovate-bot 5bd1cdc714 chore(deps): update helm release kube-prometheus-stack to v82.10.4 2026-03-15 10:00:27 +00:00
williamp 06e85aac5c Merge pull request 'chore(deps): update helm release kite to v0.8.1' (#269) from renovate/all-minor-patch-updates into main
Reviewed-on: #269
2026-03-11 22:30:41 +00:00
renovate-bot dca946aadd chore(deps): update helm release kite to v0.8.1 2026-03-11 17:00:30 +00:00
williamp 0627c8d8d5 Merge pull request 'chore(deps): update all-minor-patch-updates' (#268) from renovate/all-minor-patch-updates into main
Reviewed-on: #268
2026-03-10 19:28:33 +00:00
renovate-bot 010eeaf903 chore(deps): update all-minor-patch-updates 2026-03-10 16:00:28 +00:00
williamp 623d9a088e Merge pull request 'chore(deps): update all-minor-patch-updates' (#267) from renovate/all-minor-patch-updates into main
Reviewed-on: #267
2026-03-09 23:58:57 +00:00
renovate-bot 4da51ca17a chore(deps): update all-minor-patch-updates 2026-03-09 22:00:26 +00:00
williamp f8b3b81e83 Merge pull request 'chore(deps): update all-minor-patch-updates' (#266) from renovate/all-minor-patch-updates into main
Reviewed-on: #266
2026-03-09 12:13:29 +00:00
renovate-bot 30a8e59e35 chore(deps): update all-minor-patch-updates 2026-03-09 10:00:51 +00:00
williamp 5172c5c265 Merge pull request 'chore(deps): update helm release external-secrets to v2.1.0' (#265) from renovate/all-minor-patch-updates into main
Reviewed-on: #265
2026-03-08 17:10:37 +00:00
renovate-bot 0f638ce01a chore(deps): update helm release external-secrets to v2.1.0 2026-03-07 18:00:29 +00:00
williamp 2f495c5cc5 Merge pull request 'chore(deps): update all-minor-patch-updates' (#264) from renovate/all-minor-patch-updates into main
Reviewed-on: #264
2026-03-07 01:48:14 +00:00
renovate-bot 29dab2f81e chore(deps): update all-minor-patch-updates 2026-03-06 22:00:31 +00:00
williamp c03fb7ffdc Merge pull request 'chore(deps): update all-minor-patch-updates' (#263) from renovate/all-minor-patch-updates into main
Reviewed-on: #263
2026-03-05 14:12:59 +00:00
renovate-bot 9fb678612a chore(deps): update all-minor-patch-updates 2026-03-05 12:00:46 +00:00
williamp 5ee7466152 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v82.8.0' (#262) from renovate/all-minor-patch-updates into main
Reviewed-on: #262
2026-03-03 23:08:50 +00:00
renovate-bot 452674709a chore(deps): update helm release kube-prometheus-stack to v82.8.0 2026-03-03 23:00:43 +00:00
williamp 99ec607e6d renovate: replace automerge with consolidation of patch and minor updates into single PR per dependency 2026-03-03 11:08:56 -05:00
williamp 96424b124c Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v82.5.0' (#261) from renovate/kube-prometheus-stack-82.x into main
Reviewed-on: #261
2026-03-03 16:05:34 +00:00
renovate-bot 96937cd358 chore(deps): update helm release kube-prometheus-stack to v82.5.0 2026-03-03 15:00:31 +00:00
renovate-bot 373823e565 chore(deps): update helm release argo-cd to v9.4.7 2026-03-03 12:00:55 +00:00
renovate-bot d36dd7735f chore(deps): update helm release argo-cd to v9.4.6 2026-03-01 21:00:31 +00:00
renovate-bot 1a0aeb0e64 chore(deps): update helm release kube-prometheus-stack to v82.4.3 2026-02-27 19:00:32 +00:00
renovate-bot e6e63b5b2c chore(deps): update ghcr.io/siderolabs/discovery-service docker tag to v1.0.15 2026-02-27 11:00:50 +00:00
renovate-bot 0fcb071122 chore(deps): update helm release kube-prometheus-stack to v82.4.2 2026-02-27 10:00:35 +00:00
renovate-bot e0f4fc71af chore(deps): update helm release kube-prometheus-stack to v82.4.1 2026-02-26 20:00:31 +00:00
renovate-bot e747bbe519 chore(deps): update helm release argo-cd to v9.4.5 2026-02-26 09:00:25 +00:00
williamp 067c3cbc59 Merge pull request 'chore(deps): update helm release velero to v11.4.0' (#252) from renovate/velero-11.x into main
Reviewed-on: #252
2026-02-26 02:07:55 +00:00
williamp 27fcdd6bac Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v82.4.0' (#253) from renovate/kube-prometheus-stack-82.x into main
Reviewed-on: #253
2026-02-26 02:07:34 +00:00
renovate-bot 67a7c32675 chore(deps): update helm release kube-prometheus-stack to v82.4.0 2026-02-25 08:00:57 +00:00
renovate-bot c352c07f7b chore(deps): update helm release velero to v11.4.0 2026-02-25 05:01:07 +00:00
renovate-bot 3397d80865 chore(deps): update helm release rook-ceph to v1.19.2 2026-02-24 20:00:39 +00:00
renovate-bot 39548b9b31 chore(deps): update helm release cert-manager to v1.19.4 2026-02-24 16:01:20 +00:00
renovate-bot 9b75b8d4bf chore(deps): update helm release argo-cd to v9.4.4 2026-02-22 20:00:35 +00:00
renovate-bot 8d29dd8bd1 chore(deps): update helm release kube-prometheus-stack to v82.2.1 2026-02-22 18:00:23 +00:00
williamp 4090830d95 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v82.2.0' (#245) from renovate/kube-prometheus-stack-82.x into main
Reviewed-on: #245
2026-02-20 16:40:41 +00:00
renovate-bot 21790a5a41 chore(deps): update helm release kube-prometheus-stack to v82.2.0 2026-02-20 15:00:59 +00:00
renovate-bot 4ab5ecdd6f chore(deps): update helm release traefik to v39.0.2 2026-02-20 15:00:48 +00:00
renovate-bot c11f7897d7 chore(deps): update helm release external-secrets to v2.0.1 2026-02-20 14:00:24 +00:00
renovate-bot 8839dd6eb1 chore(deps): update helm release kube-prometheus-stack to v82.1.1 2026-02-19 17:00:39 +00:00
renovate-bot cc57178974 chore(deps): update helm release argo-cd to v9.4.3 2026-02-18 17:00:59 +00:00
williamp 3d95158244 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v82.1.0' (#240) from renovate/kube-prometheus-stack-82.x into main
Reviewed-on: #240
2026-02-17 19:36:08 +00:00
renovate-bot 141f05c6ae chore(deps): update helm release kube-prometheus-stack to v82.1.0 2026-02-17 15:00:31 +00:00
renovate-bot 3651f23c72 chore(deps): update helm release kube-prometheus-stack to v82.0.2 2026-02-17 01:00:25 +00:00
renovate-bot b4cbbd97a6 chore(deps): update helm release kube-prometheus-stack to v82.0.1 2026-02-16 19:00:19 +00:00
williamp ede26d9c1d Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v82' (#237) from renovate/kube-prometheus-stack-82.x into main
Reviewed-on: #237
2026-02-16 14:14:15 +00:00
renovate-bot cc14ef66ed chore(deps): update helm release kube-prometheus-stack to v82 2026-02-15 08:00:27 +00:00
renovate-bot 35b3f6cc42 chore(deps): update helm release kube-prometheus-stack to v81.6.9 2026-02-13 23:00:30 +00:00
renovate-bot 033a3b95ad chore(deps): update helm release kube-prometheus-stack to v81.6.8 2026-02-13 22:00:24 +00:00
renovate-bot f90060e366 chore(deps): update helm release kube-prometheus-stack to v81.6.7 2026-02-13 15:00:25 +00:00
renovate-bot 1f074a7087 chore(deps): update helm release kube-prometheus-stack to v81.6.6 2026-02-13 13:01:24 +00:00
renovate-bot c6cf3b7d84 chore(deps): update helm release argo-cd to v9.4.2 2026-02-13 13:00:24 +00:00
renovate-bot e611c68342 chore(deps): update ghcr.io/siderolabs/discovery-service docker tag to v1.0.14 2026-02-13 12:00:30 +00:00
renovate-bot d828d88078 chore(deps): update helm release traefik to v39.0.1 2026-02-13 10:00:29 +00:00
renovate-bot 8fa00efc16 chore(deps): update helm release kube-prometheus-stack to v81.6.5 2026-02-13 06:00:24 +00:00
renovate-bot fbe2274182 chore(deps): update helm release kube-prometheus-stack to v81.6.4 2026-02-13 02:00:37 +00:00
renovate-bot bf4985040a chore(deps): update helm release kubernetes-replicator to v2.12.3 2026-02-12 11:00:46 +00:00
renovate-bot ce3a367ec9 chore(deps): update helm release kube-prometheus-stack to v81.6.3 2026-02-12 09:00:20 +00:00
renovate-bot 6006e75db9 chore(deps): update helm release kube-prometheus-stack to v81.6.2 2026-02-11 19:00:31 +00:00
williamp db590d1d2c Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v81.6.1' (#224) from renovate/kube-prometheus-stack-81.x into main
Reviewed-on: #224
2026-02-10 18:52:20 +00:00
renovate-bot 7b6f92646f chore(deps): update helm release kube-prometheus-stack to v81.6.1 2026-02-10 14:00:27 +00:00
renovate-bot 6bbd9748a2 chore(deps): update helm release kube-prometheus-stack to v81.5.2 2026-02-09 19:00:20 +00:00
renovate-bot b22ff17c1d chore(deps): update helm release kube-prometheus-stack to v81.5.1 2026-02-09 13:00:43 +00:00
williamp 96900bea0c add README 2026-02-07 13:47:45 -05:00
williamp 0f84c335de omni: use ipvs for kube-proxy 2026-02-06 21:31:00 -05:00
williamp 5e1b5dc007 omni: try to use iptables mode instead of nftables for kube-proxy in attempt to fix TLS timeout issues 2026-02-06 14:48:53 -05:00
williamp f0f1b45c93 Merge pull request 'chore(deps): update cloudflare/cloudflared docker tag to v2026.2.0' (#220) from renovate/cloudflare-cloudflared-2026.x into main
Reviewed-on: #220
2026-02-06 19:42:42 +00:00
williamp 46c4e7b50f Merge pull request 'chore(deps): update helm release external-secrets to v2' (#221) from renovate/external-secrets-2.x into main
Reviewed-on: #221
2026-02-06 19:42:18 +00:00
renovate-bot dbba05d7b6 chore(deps): update helm release external-secrets to v2 2026-02-06 16:00:30 +00:00
renovate-bot db9aa7c99d chore(deps): update cloudflare/cloudflared docker tag to v2026.2.0 2026-02-06 16:00:22 +00:00
williamp 8f0d73946f traefik: update traefik-local service to have a 3600 second timeout 2026-02-05 19:43:20 -05:00
renovate-bot d0ac6145e0 chore(deps): update helm release rook-ceph to v1.19.1 2026-02-05 22:00:37 +00:00
renovate-bot b9830a2153 chore(deps): update helm release argo-cd to v9.4.1 2026-02-05 20:00:24 +00:00
renovate-bot 4f51cc5799 chore(deps): update helm release external-secrets to v1.3.2 2026-02-03 21:00:24 +00:00
williamp fe1707d078 argocd: add default values to externalsecrets 2026-02-03 13:30:51 -05:00
williamp df154d3b8b Merge pull request 'chore(deps): update helm release argo-cd to v9.4.0' (#214) from renovate/argo-cd-9.x into main
Reviewed-on: #214
2026-02-03 18:21:55 +00:00
williamp 49d6684d0a Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v81.5.0' (#216) from renovate/kube-prometheus-stack-81.x into main
Reviewed-on: #216
2026-02-03 18:21:37 +00:00
renovate-bot 179cdaffd7 chore(deps): update helm release kube-prometheus-stack to v81.5.0 2026-02-03 17:00:42 +00:00
renovate-bot 396c998336 chore(deps): update helm release kube-prometheus-stack to v81.4.3 2026-02-03 08:00:23 +00:00
williamp 1829d76a07 omni: rewrite git.dubyatp.xyz to traefik local 2026-02-02 22:22:19 -05:00
williamp 4315074427 traefik: add traefik-local svc 2026-02-02 21:21:53 -05:00
renovate-bot 289a51fd7d chore(deps): update helm release argo-cd to v9.4.0 2026-02-03 00:00:39 +00:00
renovate-bot b6f178ef88 chore(deps): update helm release cert-manager to v1.19.3 2026-02-02 15:00:30 +00:00
williamp eb021c1510 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v81.4.2' (#212) from renovate/kube-prometheus-stack-81.x into main
Reviewed-on: #212
2026-01-31 02:36:02 +00:00
renovate-bot 99e7e0ae30 chore(deps): update helm release kube-prometheus-stack to v81.4.2 2026-01-30 22:00:50 +00:00
renovate-bot e80fb62fd7 chore(deps): update helm release kube-prometheus-stack to v81.3.2 2026-01-30 09:00:57 +00:00
renovate-bot b6cf261505 chore(deps): update helm release kube-prometheus-stack to v81.3.1 2026-01-29 18:01:21 +00:00
williamp cc2b1825d5 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v81.3.0' (#209) from renovate/kube-prometheus-stack-81.x into main
Reviewed-on: #209
2026-01-29 00:01:54 +00:00
renovate-bot 5b15d78da0 chore(deps): update helm release kube-prometheus-stack to v81.3.0 2026-01-28 20:01:08 +00:00
renovate-bot 3e54d7c96d chore(deps): update helm release kube-prometheus-stack to v81.2.5 2026-01-28 18:00:32 +00:00
renovate-bot 52d680a143 chore(deps): update cloudflare/cloudflared docker tag to v2026.1.2 2026-01-27 12:00:39 +00:00
renovate-bot a5a604a496 chore(deps): update helm release argo-cd to v9.3.7 2026-01-27 08:00:41 +00:00
renovate-bot 9194de2325 chore(deps): update helm release argo-cd to v9.3.6 2026-01-26 12:00:29 +00:00
williamp ce5a5c63e3 Merge pull request 'chore(deps): update helm release external-secrets to v1.3.1' (#203) from renovate/external-secrets-1.x into main
Reviewed-on: #203
2026-01-23 23:45:20 +00:00
renovate-bot 6a4c3e2253 chore(deps): update helm release kube-prometheus-stack to v81.2.2 2026-01-23 21:00:22 +00:00
renovate-bot 65013f6720 chore(deps): update helm release external-secrets to v1.3.1 2026-01-23 19:00:19 +00:00
williamp 9426dbeb71 traefik: remove unnecessary line 2026-01-23 10:11:23 -05:00
williamp ca3234cb79 traefik: fix syntax for new version 2026-01-23 10:09:43 -05:00
williamp 6d2d895b67 Merge pull request 'chore(deps): update helm release traefik to v39' (#201) from renovate/traefik-39.x into main
Reviewed-on: #201
2026-01-23 15:01:10 +00:00
williamp c2bd9b23ac Merge pull request 'chore(deps): update helm release kite to v0.7.8' (#202) from renovate/kite-0.x into main
Reviewed-on: #202
2026-01-23 14:54:06 +00:00
williamp 0790ccd2ad kite: add deployment strategy 2026-01-23 09:52:40 -05:00
renovate-bot ddcadddaaa chore(deps): update helm release kite to v0.7.8 2026-01-23 12:00:32 +00:00
renovate-bot 8fabc526ad chore(deps): update helm release traefik to v39 2026-01-23 08:00:24 +00:00
renovate-bot e285b581f3 chore(deps): update helm release argo-cd to v9.3.5 2026-01-23 07:00:21 +00:00
renovate-bot 3f614405c8 chore(deps): update helm release kube-prometheus-stack to v81.2.1 2026-01-22 18:00:53 +00:00
williamp 079fdd4da2 rook-ceph: upgrade ceph to v20.2.0-20251104 2026-01-21 11:26:16 -05:00
williamp 6e22223c4b Merge pull request 'chore(deps): update helm release rook-ceph to v1.19.0' (#198) from renovate/rook-ceph-1.x into main
Reviewed-on: #198
2026-01-21 00:54:55 +00:00
williamp 61d5ad7071 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v81.2.0' (#196) from renovate/kube-prometheus-stack-81.x into main
Reviewed-on: #196
2026-01-21 00:45:04 +00:00
williamp cd3f663549 Merge pull request 'chore(deps): update cloudflare/cloudflared docker tag to v2026' (#197) from renovate/cloudflare-cloudflared-2026.x into main
Reviewed-on: #197
2026-01-21 00:42:35 +00:00
renovate-bot f1fd7c6cb1 chore(deps): update helm release rook-ceph to v1.19.0 2026-01-20 20:00:57 +00:00
renovate-bot 175b2c13f9 chore(deps): update helm release kube-prometheus-stack to v81.2.0 2026-01-20 19:00:21 +00:00
renovate-bot a643de1085 chore(deps): update cloudflare/cloudflared docker tag to v2026 2026-01-20 12:00:55 +00:00
williamp e5aab6948d Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v81' (#195) from renovate/kube-prometheus-stack-81.x into main
Reviewed-on: #195
2026-01-20 00:08:08 +00:00
renovate-bot 30456b3817 chore(deps): update helm release kube-prometheus-stack to v81 2026-01-19 22:00:23 +00:00
renovate-bot ab12531084 chore(deps): update velero/velero-plugin-for-aws docker tag to v1.13.2 2026-01-16 08:00:27 +00:00
renovate-bot b789b7be21 chore(deps): update helm release kube-prometheus-stack to v80.14.4 2026-01-15 15:00:53 +00:00
renovate-bot 3a2cfdb84e chore(deps): update helm release argo-cd to v9.3.4 2026-01-14 22:00:23 +00:00
williamp 506c034948 argocd: remove unnecessary and annoying prometheus alert 2026-01-14 13:27:50 -05:00
renovate-bot 7cbc80906e chore(deps): update helm release kube-prometheus-stack to v80.14.3 2026-01-14 15:00:53 +00:00
renovate-bot 3fd705520c chore(deps): update helm release kube-prometheus-stack to v80.14.2 2026-01-14 04:00:53 +00:00
renovate-bot 94d65decd1 chore(deps): update helm release argo-cd to v9.3.3 2026-01-13 23:00:55 +00:00
renovate-bot e06a1be194 chore(deps): update helm release kube-prometheus-stack to v80.14.1 2026-01-13 21:01:40 +00:00
renovate-bot dc926c31de chore(deps): update helm release rook-ceph to v1.18.9 2026-01-13 20:00:23 +00:00
williamp af31507e8c Merge branch 'renovate/argo-cd-9.x' 2026-01-13 11:28:24 -05:00
williamp c0ca549393 Merge branch 'renovate/kube-prometheus-stack-80.x' 2026-01-13 11:26:09 -05:00
renovate-bot a113c84c9d chore(deps): update helm release kube-prometheus-stack to v80.14.0 2026-01-13 16:00:44 +00:00
renovate-bot a7cc46ed8a chore(deps): update helm release argo-cd to v9.3.1 2026-01-13 11:00:29 +00:00
renovate-bot 54e6a76aab chore(deps): update helm release kube-prometheus-stack to v80.13.3 2026-01-09 10:00:58 +00:00
renovate-bot 33ef2866e9 chore(deps): update helm release traefik to v38.0.2 2026-01-08 09:00:26 +00:00
renovate-bot b609e87dd3 chore(deps): update helm release kube-prometheus-stack to v80.13.2 2026-01-07 23:00:59 +00:00
renovate-bot e1ffafc161 chore(deps): update helm release kube-prometheus-stack to v80.13.1 2026-01-07 18:00:29 +00:00
williamp 4170dfa26c Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v80.13.0' (#180) from renovate/kube-prometheus-stack-80.x into main
Reviewed-on: #180
2026-01-06 22:56:17 +00:00
renovate-bot 5fcb92ee8b chore(deps): update helm release kube-prometheus-stack to v80.13.0 2026-01-06 22:00:55 +00:00
renovate-bot c5acc2416f chore(deps): update helm release velero to v11.3.2 2026-01-06 12:00:56 +00:00
renovate-bot 87b667b2ab chore(deps): update helm release kube-prometheus-stack to v80.11.1 2026-01-06 11:00:55 +00:00
williamp d68d2db3bc Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v80.11.0' (#177) from renovate/kube-prometheus-stack-80.x into main
Reviewed-on: #177
2026-01-06 01:46:43 +00:00
williamp ad68a17eb5 Merge pull request 'chore(deps): update helm release kite to v0.7.7' (#176) from renovate/kite-0.x into main
Reviewed-on: #176
2026-01-06 01:46:28 +00:00
renovate-bot b07c7bf3a0 chore(deps): update helm release kube-prometheus-stack to v80.11.0 2026-01-05 15:00:58 +00:00
renovate-bot 78fc45ae6c chore(deps): update helm release kite to v0.7.7 2026-01-05 13:00:27 +00:00
williamp 2fa1594e99 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v80.10.0' (#173) from renovate/kube-prometheus-stack-80.x into main
Reviewed-on: #173
2026-01-03 16:53:26 +00:00
renovate-bot b211327516 chore(deps): update helm release external-secrets to v1.2.1 2026-01-02 23:00:55 +00:00
renovate-bot 6885ec790c chore(deps): update helm release argo-cd to v9.2.4 2026-01-02 23:00:27 +00:00
renovate-bot 664cace62e chore(deps): update helm release kube-prometheus-stack to v80.10.0 2026-01-02 18:00:26 +00:00
renovate-bot dae06b2c05 chore(deps): update helm release kube-prometheus-stack to v80.9.2 2025-12-31 11:00:57 +00:00
williamp 583831273d kite: create volume backups 2025-12-31 00:15:20 -05:00
williamp f327b23001 try automerging patches 2025-12-31 00:01:26 -05:00
williamp 6f2603d3a0 remove redundant node alerts 2025-12-30 23:50:18 -05:00
williamp c26ea4e139 Merge pull request 'chore(deps): update helm release velero to v11.3.1' (#170) from renovate/velero-11.x into main
Reviewed-on: #170
2025-12-31 04:44:22 +00:00
williamp b521924f00 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v80.9.1' (#171) from renovate/kube-prometheus-stack-80.x into main
Reviewed-on: #171
2025-12-31 04:44:07 +00:00
williamp 19f203e374 re-add weyma-talos-cp04 2025-12-30 23:43:11 -05:00
williamp bb251462fb update omni cluster template 2025-12-30 22:44:13 -05:00
renovate-bot 9a9d108e7c chore(deps): update helm release kube-prometheus-stack to v80.9.1 2025-12-30 19:00:56 +00:00
renovate-bot 70d5ae2e48 chore(deps): update helm release velero to v11.3.1 2025-12-29 11:00:24 +00:00
williamp e6e25baee1 Merge pull request 'chore(deps): update helm release argo-cd to v9.2.3' (#168) from renovate/argo-cd-9.x into main
Reviewed-on: #168
2025-12-29 00:55:53 +00:00
williamp a08e9930d5 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v80.8.0' (#169) from renovate/kube-prometheus-stack-80.x into main
Reviewed-on: #169
2025-12-29 00:55:41 +00:00
renovate-bot 94bb98b4ed chore(deps): update helm release argo-cd to v9.2.3 2025-12-28 13:00:26 +00:00
renovate-bot 07f863b0a7 chore(deps): update helm release kube-prometheus-stack to v80.8.0 2025-12-28 01:08:47 +00:00
williamp 79669aaf16 Merge pull request 'chore(deps): update helm release external-secrets to v1.2.0' (#164) from renovate/external-secrets-1.x into main
Reviewed-on: #164
2025-12-23 20:45:46 +00:00
williamp 7237e23151 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v80.6.0' (#165) from renovate/kube-prometheus-stack-80.x into main
Reviewed-on: #165
2025-12-23 20:45:33 +00:00
williamp f4cc060de7 Merge pull request 'chore(deps): update helm release argo-cd to v9.2.0' (#166) from renovate/argo-cd-9.x into main
Reviewed-on: #166
2025-12-23 20:44:27 +00:00
williamp 15f5cb1cbc Merge pull request 'chore(deps): update helm release kite to v0.7.6' (#167) from renovate/kite-0.x into main
Reviewed-on: #167
2025-12-23 20:44:16 +00:00
renovate-bot 373b418601 chore(deps): update helm release kite to v0.7.6 2025-12-23 17:03:22 +00:00
renovate-bot 95af55533e chore(deps): update helm release argo-cd to v9.2.0 2025-12-23 12:00:27 +00:00
williamp 24b29cc9a9 consolidate everything into main file, needed for import 2025-12-22 16:49:31 -05:00
williamp ba292377ab derp 2025-12-20 17:19:37 -05:00
williamp 9ee0e419a0 fix dispatcharr route 2025-12-20 17:16:49 -05:00
williamp 0ee35ec27c im tar 2025-12-20 17:13:04 -05:00
williamp 9697736ed3 traefik: add rule to dispatcharr route 2025-12-20 17:10:35 -05:00
williamp 1dea2edfcc traefik: add static route for dispatcharr 2025-12-20 17:00:13 -05:00
renovate-bot ae3d90eb10 chore(deps): update helm release kube-prometheus-stack to v80.6.0 2025-12-19 16:00:23 +00:00
renovate-bot 72e16276b8 chore(deps): update helm release external-secrets to v1.2.0 2025-12-19 15:00:19 +00:00
williamp f1fe246f14 Merge pull request 'chore(deps): update helm release traefik to v38' (#161) from renovate/traefik-38.x into main
Reviewed-on: #161
2025-12-19 14:59:37 +00:00
williamp afe3aaf866 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v80.5.0' (#163) from renovate/kube-prometheus-stack-80.x into main
Reviewed-on: #163
2025-12-19 14:58:25 +00:00
williamp 603b6fdbd3 Merge pull request 'chore(deps): update helm release argo-cd to v9.1.9' (#162) from renovate/argo-cd-9.x into main
Reviewed-on: #162
2025-12-19 14:56:05 +00:00
renovate-bot b4fa24c8d1 chore(deps): update helm release traefik to v38 2025-12-19 14:01:27 +00:00
renovate-bot 1c344f11c4 chore(deps): update helm release kube-prometheus-stack to v80.5.0 2025-12-18 16:01:00 +00:00
renovate-bot a9f1a7cf69 chore(deps): update helm release argo-cd to v9.1.9 2025-12-18 13:00:55 +00:00
williamp 335563a895 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v80.4.2' (#160) from renovate/kube-prometheus-stack-80.x into main
Reviewed-on: #160
2025-12-17 15:34:06 +00:00
williamp 72df8103f2 Merge pull request 'chore(deps): update helm release argo-cd to v9.1.8' (#159) from renovate/argo-cd-9.x into main
Reviewed-on: #159
2025-12-17 15:33:19 +00:00
renovate-bot 22dbbaf64f chore(deps): update helm release kube-prometheus-stack to v80.4.2 2025-12-17 15:01:09 +00:00
williamp f926df6bea enforce replicas for discord alertmanager agent 2025-12-17 09:19:15 -05:00
renovate-bot ecba2195b6 chore(deps): update helm release argo-cd to v9.1.8 2025-12-16 02:00:20 +00:00
williamp da770facc5 Merge pull request 'create omni cluster template' (#158) from omni-templates into main
Reviewed-on: #158
2025-12-13 22:34:05 +00:00
williamp f0dad1e033 create omni cluster template 2025-12-13 12:48:56 -05:00
williamp ec154c641f Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v80.4.1' (#157) from renovate/kube-prometheus-stack-80.x into main
Reviewed-on: #157
2025-12-13 12:00:17 +00:00
renovate-bot f5405bf44d chore(deps): update helm release kube-prometheus-stack to v80.4.1 2025-12-12 23:00:28 +00:00
williamp 1442802804 kite: perma config 2025-12-12 10:23:00 -05:00
williamp 4ca30efa7e kite: add URL 2025-12-12 10:11:26 -05:00
williamp 96ed8909af add kite 2025-12-12 10:06:45 -05:00
williamp dd69b42f26 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v80.2.2' (#156) from renovate/kube-prometheus-stack-80.x into main
Reviewed-on: #156
2025-12-12 14:55:15 +00:00
renovate-bot 98c688d4bd chore(deps): update helm release kube-prometheus-stack to v80.2.2 2025-12-12 08:00:20 +00:00
williamp 2bb9cdd402 Merge pull request 'chore(deps): update ghcr.io/siderolabs/discovery-service docker tag to v1.0.13' (#155) from renovate/ghcr.io-siderolabs-discovery-service-1.x into main
Reviewed-on: #155
2025-12-10 20:30:37 +00:00
williamp c0a9301cad use static manifest since we needed to add '--kubelet-insecure-tls' as an arg 2025-12-10 11:14:56 -05:00
renovate-bot 2104e6c2ad chore(deps): update ghcr.io/siderolabs/discovery-service docker tag to v1.0.13 2025-12-10 11:00:24 +00:00
williamp b24a003397 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v80.2.0' (#154) from renovate/kube-prometheus-stack-80.x into main
Reviewed-on: #154
2025-12-10 04:17:57 +00:00
williamp d134b4815e Merge pull request 'chore(deps): update helm release cert-manager to v1.19.2' (#153) from renovate/cert-manager-1.x into main
Reviewed-on: #153
2025-12-10 04:17:36 +00:00
williamp f65b07a8f3 Merge pull request 'chore(deps): update helm release argo-cd to v9.1.7' (#152) from renovate/argo-cd-9.x into main
Reviewed-on: #152
2025-12-10 04:10:10 +00:00
renovate-bot 79e96ae6ea chore(deps): update helm release kube-prometheus-stack to v80.2.0 2025-12-09 18:00:45 +00:00
renovate-bot 3ba9e5e420 chore(deps): update helm release cert-manager to v1.19.2 2025-12-09 17:00:34 +00:00
renovate-bot 03a6abe1b8 chore(deps): update helm release argo-cd to v9.1.7 2025-12-08 23:00:23 +00:00
williamp 97fd5c118e Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v80' (#151) from renovate/kube-prometheus-stack-80.x into main
Reviewed-on: #151
2025-12-07 23:49:01 +00:00
renovate-bot f946189ff6 chore(deps): update helm release kube-prometheus-stack to v80 2025-12-07 21:00:21 +00:00
williamp e12aebefbc spacing is important 2025-12-05 15:05:14 -05:00
williamp d0cf5b9064 argocd: fix syntax in alert 2025-12-05 15:03:04 -05:00
williamp 48e4a6aa7d argocd: fix alert definition with improerly escaped var 2025-12-05 14:59:21 -05:00
williamp 25b1e757be Merge pull request 'chore(deps): update helm release external-secrets to v1.1.1' (#150) from renovate/external-secrets-1.x into main
Reviewed-on: #150
2025-12-05 19:53:58 +00:00
williamp d6d9c402cf Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v79.12.0' (#149) from renovate/kube-prometheus-stack-79.x into main
Reviewed-on: #149
2025-12-05 19:53:21 +00:00
renovate-bot 15fedb055f chore(deps): update helm release external-secrets to v1.1.1 2025-12-05 18:00:19 +00:00
renovate-bot 0c22389ac4 chore(deps): update helm release kube-prometheus-stack to v79.12.0 2025-12-05 16:00:25 +00:00
williamp eba1779962 Merge pull request 'chore(deps): update helm release metallb to v0.15.3' (#148) from renovate/metallb-0.x into main
Reviewed-on: #148
2025-12-05 15:03:58 +00:00
williamp 0a3ced36c7 Merge pull request 'chore(deps): update helm release argo-cd to v9.1.6' (#147) from renovate/argo-cd-9.x into main
Reviewed-on: #147
2025-12-05 15:03:35 +00:00
renovate-bot 7f84573f4c chore(deps): update helm release metallb to v0.15.3 2025-12-04 16:00:36 +00:00
renovate-bot 15544a2186 chore(deps): update helm release argo-cd to v9.1.6 2025-12-04 04:00:20 +00:00
williamp eda6ba5f28 Merge pull request 'chore(deps): update helm release rook-ceph to v1.18.8' (#146) from renovate/rook-ceph-1.x into main
Reviewed-on: #146
2025-12-03 16:42:02 +00:00
williamp c672a5cd22 multus: raise CPU limits 2025-12-03 09:02:00 -05:00
williamp 0fd46e4b75 multus: swap values around, oops 2025-12-03 08:59:24 -05:00
williamp caca1c7ec5 multus: increase resource limits to prevent node crashes 2025-12-03 08:56:29 -05:00
renovate-bot d328f7ff37 chore(deps): update helm release rook-ceph to v1.18.8 2025-12-02 19:00:18 +00:00
williamp 3afb27bb3d Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v79.11.0' (#145) from renovate/kube-prometheus-stack-79.x into main
Reviewed-on: #145
2025-12-02 16:01:04 +00:00
williamp e7775aa9dc monitoring: additional alert for excessive warnings 2025-12-02 10:57:46 -05:00
renovate-bot e7cfab1232 chore(deps): update helm release kube-prometheus-stack to v79.11.0 2025-12-02 15:00:21 +00:00
williamp 659aa5f1aa fix again 2025-12-02 09:58:15 -05:00
williamp bab5bbb30b fix alertmanager config 2025-12-02 09:53:14 -05:00
williamp ec292a6973 a 2025-12-02 09:46:25 -05:00
williamp 43c6ca185c temporarily add warning to alert just to finally get a working test lmao 2025-12-02 09:15:07 -05:00
williamp 927d4e6905 rm old static prometheus container 2025-12-02 09:09:38 -05:00
williamp dfaa4e2961 traefik: make TraefikDown alert critical 2025-12-02 09:07:27 -05:00
williamp 1716e524a3 edit alertmanager 2025-12-01 21:18:22 -05:00
williamp 6df782ae99 add null route 2025-12-01 21:14:26 -05:00
williamp 6f3ce4f03d monitoring: match critical severity 2025-12-01 21:13:34 -05:00
williamp bc539ce7c3 fix argo sync in externalsecret 2025-12-01 20:39:02 -05:00
williamp adea620df2 add null receiver 2025-12-01 20:32:09 -05:00
williamp 9371f50404 monitoring: configure alertmanager to use discord 2025-12-01 20:18:27 -05:00
williamp 2a0e5004f0 monitoring: add discord username 2025-12-01 20:13:32 -05:00
williamp e7fb07343c monitoring: enable discord notifications 2025-12-01 20:10:03 -05:00
williamp ae8f0e560a Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v79.10.0' (#143) from renovate/kube-prometheus-stack-79.x into main
Reviewed-on: #143
2025-12-01 20:51:07 +00:00
renovate-bot 4c87356828 chore(deps): update helm release kube-prometheus-stack to v79.10.0 2025-12-01 20:00:20 +00:00
williamp cc858dd8f3 Merge pull request 'chore(deps): update helm release argo-cd to v9.1.5' (#142) from renovate/argo-cd-9.x into main
Reviewed-on: #142
2025-11-30 20:28:29 +00:00
renovate-bot 5d71a0f199 chore(deps): update helm release argo-cd to v9.1.5 2025-11-30 20:00:21 +00:00
williamp a4d2f870d9 rm guestbook (the argo testing/example app) 2025-11-29 19:06:39 -05:00
williamp 7136a0f322 velero: add alert rules 2025-11-29 18:45:28 -05:00
williamp c2d6c0c8bb velero: enable metrics and prometheusrule 2025-11-29 18:38:43 -05:00
williamp f3c3741409 Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v79.9.0' (#141) from renovate/kube-prometheus-stack-79.x into main
Reviewed-on: #141
2025-11-29 00:36:23 +00:00
renovate-bot 80b7cb2282 chore(deps): update helm release kube-prometheus-stack to v79.9.0 2025-11-28 17:00:22 +00:00
williamp bf66dd0818 Merge pull request 'chore(deps): update ghcr.io/siderolabs/discovery-service docker tag to v1.0.12' (#140) from renovate/ghcr.io-siderolabs-discovery-service-1.x into main
Reviewed-on: #140
2025-11-28 16:08:02 +00:00
williamp eea1c80a27 rook-ceph: rm CephNodeDiskspaceWarning due to improper, non-ceph related alerts 2025-11-28 10:40:41 -05:00
renovate-bot 612dd16d4b chore(deps): update ghcr.io/siderolabs/discovery-service docker tag to v1.0.12 2025-11-28 15:00:19 +00:00
williamp 341b402f0e Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v79.8.2' (#139) from renovate/kube-prometheus-stack-79.x into main
Reviewed-on: #139
2025-11-28 01:08:18 +00:00
renovate-bot 76eaa1dd98 chore(deps): update helm release kube-prometheus-stack to v79.8.2 2025-11-26 02:00:19 +00:00
williamp a730f43cbd Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v79.8.1' (#138) from renovate/kube-prometheus-stack-79.x into main
Reviewed-on: #138
2025-11-26 00:18:05 +00:00
renovate-bot 4bd23be552 chore(deps): update helm release kube-prometheus-stack to v79.8.1 2025-11-25 21:00:19 +00:00
williamp 6cd4b20970 metallb: no rbacPrometheus 2025-11-24 22:45:00 -05:00
williamp c3c66cb9e3 metallb: fix values 2025-11-24 22:43:52 -05:00
williamp b0fb79f7ea traefik: fix prometheusrule 2025-11-24 22:39:00 -05:00
williamp 624c5c7a8c traefik: enable monitoring 2025-11-24 22:33:08 -05:00
williamp ebf8f25342 metallb: enable prometheusrules and servicemonitors 2025-11-24 22:31:16 -05:00
williamp 87c5d94e0d external-secrets: enable monitoring 2025-11-24 22:29:47 -05:00
williamp b9a8f3fea8 cert-manager: enable monitoring 2025-11-24 22:24:30 -05:00
williamp 6634531a37 rook-ceph: add prometheusrules 2025-11-24 22:09:36 -05:00
williamp fb9a0e4015 whereabouts: increase daemonset cpu limits 2025-11-24 15:11:45 -05:00
williamp 3c424ddc30 argocd: fix prometheusrule 2025-11-24 11:46:13 -05:00
williamp a14e4c8aad how about now 2025-11-24 11:37:44 -05:00
williamp c9f7587c8f argocd: correct syntax for metrics 2025-11-24 11:18:55 -05:00
williamp 0f61b0cd03 argocd: enable metrics 2025-11-24 11:13:22 -05:00
williamp 6d9e1a5395 Merge pull request 'chore(deps): update helm release argo-cd to v9.1.4' (#136) from renovate/argo-cd-9.x into main
Reviewed-on: #136
2025-11-24 16:05:41 +00:00
williamp 29db286734 Merge pull request 'chore(deps): update helm release kubernetes-replicator to v2.12.2' (#137) from renovate/kubernetes-replicator-2.x into main
Reviewed-on: #137
2025-11-24 16:05:23 +00:00
renovate-bot 2af2f3deb5 chore(deps): update helm release kubernetes-replicator to v2.12.2 2025-11-24 11:00:31 +00:00
renovate-bot 0a9c30943d chore(deps): update helm release argo-cd to v9.1.4 2025-11-23 14:00:18 +00:00
williamp 0691a9e6bd Merge pull request 'chore(deps): update helm release external-secrets to v1.1.0' (#134) from renovate/external-secrets-1.x into main
Reviewed-on: #134
2025-11-21 18:43:05 +00:00
williamp 6f609c831b Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v79.7.1' (#135) from renovate/kube-prometheus-stack-79.x into main
Reviewed-on: #135
2025-11-21 18:42:55 +00:00
williamp 96b0eb2fb6 rook-ceph: expand monitoring templates 2025-11-21 13:32:44 -05:00
renovate-bot 9f60be9bc7 chore(deps): update helm release kube-prometheus-stack to v79.7.1 2025-11-21 16:00:53 +00:00
williamp 9d8b955fd4 Merge pull request 'chore(deps): update helm release traefik to v37.4.0' (#133) from renovate/traefik-37.x into main
Reviewed-on: #133
2025-11-21 15:19:48 +00:00
williamp 95f100b56c monitoring: i see the issue now 2025-11-21 10:07:42 -05:00
williamp 298797c285 monitoring: aaaa 2025-11-21 10:02:33 -05:00
renovate-bot 4dab964987 chore(deps): update helm release external-secrets to v1.1.0 2025-11-21 15:00:53 +00:00
williamp f0a0be8b5a monitoring: don't use helm selectors 2025-11-21 10:00:13 -05:00
williamp fdb0421c4e rook-ceph: create servicemonitor chart template 2025-11-21 09:44:43 -05:00
renovate-bot 3e178c0d95 chore(deps): update helm release traefik to v37.4.0 2025-11-21 08:00:19 +00:00
williamp 24df3974ae rook-ceph: clean invalid values file, enable monitoring 2025-11-20 21:25:09 -05:00
williamp 4f2e13808f setting doesnt seem to work 2025-11-20 21:22:56 -05:00
williamp 4f92db2434 rook-ceph: enable integrated cluster monitoring 2025-11-20 20:41:03 -05:00
williamp 16d3b2a66d rook-ceph: enable integrated monitoring 2025-11-20 20:32:32 -05:00
williamp eede7fce5f config patch to allow kube-system svcs to be scraped 2025-11-20 20:31:10 -05:00
williamp f75a9a6d81 monitoring: use cert-manager 2025-11-20 16:02:51 -05:00
williamp ea07fb74c2 fix storage spec key 2025-11-20 15:55:39 -05:00
williamp ca7d2d9a96 monitoring: add storageclasses 2025-11-20 15:49:00 -05:00
williamp 40459d297e monitoring: replace prometheus agent manifests with prometheus stack chart 2025-11-20 15:36:43 -05:00
williamp 4e270f7cef monitoring: disable prometheus agent to make way for integrated prometheus 2025-11-20 09:01:04 -05:00
williamp 1bd3f85cef Merge pull request 'chore(deps): update helm release velero to v11.2.0' (#132) from renovate/velero-11.x into main
Reviewed-on: #132
2025-11-19 19:54:27 +00:00
renovate-bot ee4db8f776 chore(deps): update helm release velero to v11.2.0 2025-11-19 12:00:29 +00:00
williamp 132446d6a0 Merge pull request 'chore(deps): update helm release rook-ceph to v1.18.7' (#130) from renovate/rook-ceph-1.x into main
Reviewed-on: #130
2025-11-14 15:22:59 +00:00
williamp 63c029fe78 Merge pull request 'chore(deps): update helm release argo-cd to v9.1.3' (#131) from renovate/argo-cd-9.x into main
Reviewed-on: #131
2025-11-14 15:22:31 +00:00
renovate-bot fe9c2fc388 chore(deps): update helm release argo-cd to v9.1.3 2025-11-14 11:00:36 +00:00
renovate-bot 68296177e1 chore(deps): update helm release rook-ceph to v1.18.7 2025-11-13 21:00:19 +00:00
williamp 6db62946a5 argocd: enable health checks 2025-11-13 09:52:34 -05:00
williamp c0882a9b71 Merge pull request 'chore(deps): update helm release argo-cd to v9.1.2' (#129) from renovate/argo-cd-9.x into main
Reviewed-on: #129
2025-11-13 14:45:02 +00:00
renovate-bot 800f0ac5f4 chore(deps): update helm release argo-cd to v9.1.2 2025-11-13 14:00:17 +00:00
williamp 51d7a2d88e whereabouts: pin version on v0.9.2 2025-11-12 19:34:43 -05:00
williamp 1f98fa3f59 prometheus-agent: upgrade to 3.7.3 2025-11-12 19:33:26 -05:00
williamp 9e9aef6425 kubevirt: upgrade to 1.6.3 2025-11-12 19:32:42 -05:00
williamp 3f7b0c10ce multus: use pinned version instead of general 'thick' image 2025-11-12 14:43:55 -05:00
williamp cbf8f44e78 Merge pull request 'chore(deps): update helm release argo-cd to v9.1.1' (#128) from renovate/argo-cd-9.x into main
Reviewed-on: #128
2025-11-12 19:29:20 +00:00
renovate-bot caab50f652 chore(deps): update helm release argo-cd to v9.1.1 2025-11-11 18:57:33 +00:00
williamp ab0baa3a4a metallb: rm image specs from values, should be defaults from chart 2025-11-10 10:05:26 -05:00
williamp 4fc589203a Merge pull request 'chore(deps): update quay.io/frrouting/frr docker tag to v10.5.0' (#127) from renovate/quay.io-frrouting-frr-10.x into main
Reviewed-on: #127
2025-11-10 15:02:20 +00:00
williamp ed04eec1ad Merge pull request 'chore(deps): update helm release traefik to v37.3.0' (#126) from renovate/traefik-37.x into main
Reviewed-on: #126
2025-11-10 15:02:04 +00:00
renovate-bot 668f893d4f chore(deps): update quay.io/frrouting/frr docker tag to v10.5.0 2025-11-10 15:00:24 +00:00
renovate-bot 3426ad838e chore(deps): update helm release traefik to v37.3.0 2025-11-10 11:00:30 +00:00
williamp d01ef24c43 Merge pull request 'chore(deps): update cloudflare/cloudflared docker tag to v2025.11.1' (#125) from renovate/cloudflare-cloudflared-2025.x into main
Reviewed-on: #125
2025-11-08 15:51:21 +00:00
renovate-bot d8c0321e4a chore(deps): update cloudflare/cloudflared docker tag to v2025.11.1 2025-11-07 18:00:19 +00:00
williamp 497669d048 Merge pull request 'chore(deps): update helm release external-secrets to v1' (#124) from renovate/external-secrets-1.x into main
Reviewed-on: #124
2025-11-07 16:00:30 +00:00
williamp b38b094da8 Merge pull request 'chore(deps): update velero/velero-plugin-for-aws docker tag to v1.13.1' (#123) from renovate/velero-velero-plugin-for-aws-1.x into main
Reviewed-on: #123
2025-11-07 16:00:21 +00:00
renovate-bot 273659d6c7 chore(deps): update helm release external-secrets to v1 2025-11-07 13:00:22 +00:00
renovate-bot f9c17ad471 chore(deps): update velero/velero-plugin-for-aws docker tag to v1.13.1 2025-11-07 10:00:20 +00:00
williamp df5f9cd891 Merge pull request 'chore(deps): update helm release argo-cd to v9.1.0' (#122) from renovate/argo-cd-9.x into main
Reviewed-on: #122
2025-11-05 01:44:08 +00:00
renovate-bot f9853be26f chore(deps): update helm release argo-cd to v9.1.0 2025-11-04 22:00:22 +00:00
williamp 672974d1a3 Merge pull request 'chore(deps): update helm release kubernetes-replicator to v2.12.1' (#120) from renovate/kubernetes-replicator-2.x into main
Reviewed-on: #120
2025-11-04 00:45:50 +00:00
williamp 8b24ca740d Merge pull request 'chore(deps): update helm release argo-cd to v9.0.6' (#121) from renovate/argo-cd-9.x into main
Reviewed-on: #121
2025-11-04 00:45:20 +00:00
renovate-bot 3d2aa36c78 chore(deps): update helm release argo-cd to v9.0.6 2025-11-04 00:00:29 +00:00
renovate-bot 5fa662be9b chore(deps): update helm release kubernetes-replicator to v2.12.1 2025-11-03 11:00:33 +00:00
williamp b39a86c9f0 Merge pull request 'chore(deps): update prom/prometheus docker tag to v3.7.3' (#118) from renovate/prom-prometheus-3.x into main
Reviewed-on: #118
2025-10-31 00:28:55 +00:00
williamp f53018c65a Merge pull request 'chore(deps): update cloudflare/cloudflared docker tag to v2025.10.1' (#119) from renovate/cloudflare-cloudflared-2025.x into main
Reviewed-on: #119
2025-10-31 00:26:14 +00:00
renovate-bot 25093d9c8f chore(deps): update cloudflare/cloudflared docker tag to v2025.10.1 2025-10-30 19:00:24 +00:00
renovate-bot 891a1353d1 chore(deps): update prom/prometheus docker tag to v3.7.3 2025-10-30 08:00:25 +00:00
williamp 02805a9e43 Merge pull request 'chore(deps): update helm release rook-ceph to v1.18.6' (#117) from renovate/rook-ceph-1.x into main
Reviewed-on: #117
2025-10-28 17:50:56 +00:00
renovate-bot e0643dd642 chore(deps): update helm release rook-ceph to v1.18.6 2025-10-28 16:00:28 +00:00
williamp 19a5c3320d Merge pull request 'chore(deps): update helm release external-secrets to v0.20.4' (#116) from renovate/external-secrets-0.x into main
Reviewed-on: #116
2025-10-24 19:36:29 +00:00
williamp e15579079f Merge pull request 'chore(deps): update helm release argo-cd to v9.0.5' (#115) from renovate/argo-cd-9.x into main
Reviewed-on: #115
2025-10-24 19:36:16 +00:00
renovate-bot 63468a1787 chore(deps): update helm release external-secrets to v0.20.4 2025-10-24 16:00:36 +00:00
renovate-bot 2d537f5e43 chore(deps): update helm release argo-cd to v9.0.5 2025-10-24 12:00:22 +00:00
williamp 427b201613 Merge pull request 'chore(deps): update helm release rook-ceph to v1.18.5' (#114) from renovate/rook-ceph-1.x into main
Reviewed-on: #114
2025-10-23 23:46:56 +00:00
williamp 169ad887f0 Merge pull request 'chore(deps): update hashicorp/vault docker tag to v1.21' (#113) from renovate/hashicorp-vault-1.x into main
Reviewed-on: #113
2025-10-23 23:46:20 +00:00
renovate-bot 46fa2af0e4 chore(deps): update helm release rook-ceph to v1.18.5 2025-10-23 19:00:27 +00:00
renovate-bot 7dc522de0a chore(deps): update hashicorp/vault docker tag to v1.21 2025-10-22 21:00:26 +00:00
williamp e9bebb4cfc Merge pull request 'chore(deps): update helm release traefik to v37.2.0' (#111) from renovate/traefik-37.x into main
Reviewed-on: #111
2025-10-22 14:35:55 +00:00
williamp ab029c189b Merge pull request 'chore(deps): update prom/prometheus docker tag to v3.7.2' (#112) from renovate/prom-prometheus-3.x into main
Reviewed-on: #112
2025-10-22 14:31:19 +00:00
renovate-bot 5e3ca29d1e chore(deps): update prom/prometheus docker tag to v3.7.2 2025-10-22 14:00:19 +00:00
renovate-bot e700051562 chore(deps): update helm release traefik to v37.2.0 2025-10-22 08:00:24 +00:00
williamp 0f9a889ec1 Merge pull request 'chore(deps): update helm release argo-cd to v9.0.3' (#110) from renovate/argo-cd-9.x into main
Reviewed-on: #110
2025-10-20 13:06:59 +00:00
renovate-bot c085aba38b chore(deps): update helm release argo-cd to v9.0.3 2025-10-20 13:00:59 +00:00
williamp e00e6de9fa Merge pull request 'chore(deps): update helm release argo-cd to v9' (#109) from renovate/argo-cd-9.x into main
Reviewed-on: #109
2025-10-18 14:52:23 +00:00
renovate-bot 8f53ff1b21 chore(deps): update helm release argo-cd to v9 2025-10-18 11:00:26 +00:00
williamp e9cb97f0fa Merge pull request 'chore(deps): update prom/prometheus docker tag to v3.7.1' (#108) from renovate/prom-prometheus-3.x into main
Reviewed-on: #108
2025-10-17 14:38:14 +00:00
williamp f1cc6423a7 Merge pull request 'chore(deps): update helm release argo-cd to v8.6.4' (#106) from renovate/argo-cd-8.x into main
Reviewed-on: #106
2025-10-17 14:36:41 +00:00
williamp 7fb4f27974 Merge pull request 'chore(deps): update helm release velero to v11.1.1' (#107) from renovate/velero-11.x into main
Reviewed-on: #107
2025-10-17 14:36:20 +00:00
renovate-bot 62f917b721 chore(deps): update prom/prometheus docker tag to v3.7.1 2025-10-17 09:00:39 +00:00
renovate-bot d073c71f3b chore(deps): update helm release velero to v11.1.1 2025-10-17 09:00:34 +00:00
renovate-bot 41cdf4edad chore(deps): update helm release argo-cd to v8.6.4 2025-10-17 07:00:23 +00:00
williamp eeaf01adc6 Merge pull request 'chore(deps): update prom/prometheus docker tag to v3.7.0' (#104) from renovate/prom-prometheus-3.x into main
Reviewed-on: #104
2025-10-15 17:24:33 +00:00
williamp 0eea239d71 Merge pull request 'chore(deps): update helm release cert-manager to v1.19.1' (#105) from renovate/cert-manager-1.x into main
Reviewed-on: #105
2025-10-15 17:23:59 +00:00
williamp bdb637029d Merge pull request 'chore(deps): update helm release argo-cd to v8.6.3' (#103) from renovate/argo-cd-8.x into main
Reviewed-on: #103
2025-10-15 17:23:29 +00:00
renovate-bot 33758df99d chore(deps): update helm release cert-manager to v1.19.1 2025-10-15 16:01:15 +00:00
renovate-bot 062176378d chore(deps): update prom/prometheus docker tag to v3.7.0 2025-10-15 11:00:47 +00:00
renovate-bot 726bf4c97b chore(deps): update helm release argo-cd to v8.6.3 2025-10-15 11:00:23 +00:00
williamp 8aa197f222 Merge pull request 'chore(deps): update cloudflare/cloudflared docker tag to v2025.10.0' (#102) from renovate/cloudflare-cloudflared-2025.x into main
Reviewed-on: #102
2025-10-15 01:56:54 +00:00
williamp 0ad67c8d82 Merge pull request 'chore(deps): update helm release external-secrets to v0.20.3' (#101) from renovate/external-secrets-0.x into main
Reviewed-on: #101
2025-10-15 01:56:40 +00:00
renovate-bot 92c47457ae chore(deps): update cloudflare/cloudflared docker tag to v2025.10.0 2025-10-14 20:00:32 +00:00
renovate-bot 9d6f260b5a chore(deps): update helm release external-secrets to v0.20.3 2025-10-14 07:00:32 +00:00
williamp bc6e715459 Merge pull request 'chore(deps): update helm release argo-cd to v8.6.1' (#100) from renovate/argo-cd-8.x into main
Reviewed-on: #100
2025-10-13 23:27:49 +00:00
renovate-bot de969bef57 chore(deps): update helm release argo-cd to v8.6.1 2025-10-13 21:00:21 +00:00
williamp 9efc9a4cd4 Merge pull request 'chore(deps): update helm release velero to v11.1.0' (#99) from renovate/velero-11.x into main
Reviewed-on: #99
2025-10-13 14:28:14 +00:00
renovate-bot 27b2bc2f34 chore(deps): update helm release velero to v11.1.0 2025-10-13 14:00:33 +00:00
williamp 3e994a33b6 Merge pull request 'chore(deps): update helm release argo-cd to v8.6.0' (#98) from renovate/argo-cd-8.x into main
Reviewed-on: #98
2025-10-12 04:35:45 +00:00
renovate-bot 80a4befb77 chore(deps): update helm release argo-cd to v8.6.0 2025-10-09 23:00:32 +00:00
williamp c79a1b24d1 Merge pull request 'chore(deps): update helm release argo-cd to v8.5.10' (#97) from renovate/argo-cd-8.x into main
Reviewed-on: #97
2025-10-08 13:00:36 +00:00
renovate-bot 145d1eca8f chore(deps): update helm release argo-cd to v8.5.10 2025-10-08 11:00:22 +00:00
williamp b1643ab8ea Merge pull request 'chore(deps): update helm release argo-cd to v8.5.9' (#95) from renovate/argo-cd-8.x into main
Reviewed-on: #95
2025-10-07 18:19:42 +00:00
williamp b1138202be Merge pull request 'chore(deps): update helm release cert-manager to v1.19.0' (#96) from renovate/cert-manager-1.x into main
Reviewed-on: #96
2025-10-07 18:17:16 +00:00
renovate-bot 1d50ebe17d chore(deps): update helm release cert-manager to v1.19.0 2025-10-07 17:00:30 +00:00
renovate-bot 9b9f97fac1 chore(deps): update helm release argo-cd to v8.5.9 2025-10-07 09:00:20 +00:00
williamp ab6a65ddd1 Merge pull request 'chore(deps): update helm release rook-ceph to v1.18.4' (#94) from renovate/rook-ceph-1.x into main
Reviewed-on: #94
2025-10-06 22:54:32 +00:00
renovate-bot 9869e9fc01 chore(deps): update helm release rook-ceph to v1.18.4 2025-10-06 19:00:21 +00:00
williamp f2fbedb0fb argocd: add gitea known hosts 2025-10-06 12:45:27 -04:00
williamp 4cee8124e1 add argocd to system-apps 2025-10-06 10:47:28 -04:00
williamp 9ba11b4be9 allow resizing block PVs 2025-10-05 01:29:39 -04:00
williamp f272dd9bc3 add .vscode to gitignore 2025-10-04 09:10:37 -04:00
williamp e422d1e313 add ceph versioning to renovate checks 2025-10-04 09:09:43 -04:00
williamp 730b56d47d Merge pull request 'chore(deps): update helm release traefik to v37.1.2' (#92) from renovate/traefik-37.x into main
Reviewed-on: #92
2025-10-03 18:17:03 +00:00
williamp d3b46ebb48 Merge pull request 'chore(deps): update helm release external-secrets to v0.20.2' (#93) from renovate/external-secrets-0.x into main
Reviewed-on: #93
2025-10-03 18:16:46 +00:00
renovate-bot ec4b4577f1 chore(deps): update helm release external-secrets to v0.20.2 2025-10-03 12:00:20 +00:00
renovate-bot 57ce27076e chore(deps): update helm release traefik to v37.1.2 2025-10-03 10:00:18 +00:00
williamp 86ac5b9682 Merge pull request 'chore(deps): update helm release rook-ceph to v1.18.3' (#91) from renovate/rook-ceph-1.x into main
Reviewed-on: #91
2025-10-02 19:26:32 +00:00
williamp aeb7ed8d58 rook-ceph: use image/tag values from default rather than pinning them in our values.yaml 2025-10-02 15:26:11 -04:00
renovate-bot b1570120e0 chore(deps): update helm release rook-ceph to v1.18.3 2025-10-02 18:00:29 +00:00
williamp 0b1ca020bb add kubevirt 2025-10-01 08:56:00 -04:00
williamp 84def46f8b add kubevirt network to weyma-talos-testw05 2025-09-26 14:32:19 -04:00
williamp 5b101dd9b7 add weyma-talos-w03 to kubevirt vlan 2025-09-26 14:17:33 -04:00
williamp af9ea52245 rook-ceph: allow expanding weyma-shared volumes 2025-09-25 17:42:58 -04:00
williamp ee5f6dce5c update weyma-talos-testw02 and weyma-talos-w02 for kubevirt bridge 2025-09-25 13:28:23 -04:00
williamp 709efb5f0d rm static address assignemnt for br0 on weyma-talos-testw04 2025-09-25 13:17:50 -04:00
williamp 2eb7d4bb4e update kubevirt NAD for production 2025-09-25 13:12:43 -04:00
williamp 517634f34f update kubevirt NAD 2025-09-25 13:01:18 -04:00
williamp fae6191aac add whereabouts 2025-09-25 12:47:16 -04:00
williamp 4d5469594d multus: rm dhcp installer 2025-09-25 12:40:55 -04:00
williamp bee5e777fa debug logic for dhcp install 2025-09-25 11:00:45 -04:00
williamp 512e5270e0 fix issue in dhcp installer 2025-09-25 10:58:03 -04:00
williamp 643716a68e multus: patch daemonset to install dhcp client 2025-09-25 10:52:22 -04:00
williamp 4fbd67f486 multus: use DHCP for kubevirt NAD 2025-09-25 10:11:18 -04:00
williamp b35bfd987a experimental vlan config for weyma-talos-testw04 2025-09-24 16:01:38 -04:00
williamp 047ea00faa update nads again 2025-09-24 15:50:30 -04:00
williamp e4fd532dc2 multus: update nad 2025-09-24 15:47:01 -04:00
williamp 45d5924600 Merge pull request 'chore(deps): update helm release velero to v11' (#89) from renovate/velero-11.x into main
Reviewed-on: #89
2025-09-24 19:17:16 +00:00
williamp 6814dbe6dd more nad test 2025-09-24 10:41:52 -04:00
williamp 8fe5581f80 oops 2025-09-24 10:33:39 -04:00
williamp 3fb2e8507c test NAD setting 2025-09-24 10:32:31 -04:00
williamp 86de849114 update nad 2025-09-24 10:22:26 -04:00
williamp 28ca8fbb8c multus: fix ipam 2025-09-24 10:17:58 -04:00
williamp d64feb7228 would help if i actually gave it the right file ext 2025-09-24 10:07:26 -04:00
williamp e474c621fd multus: add kubevirt NAD 2025-09-24 10:01:29 -04:00
williamp 0a1a7b7a79 add multus 2025-09-24 09:29:23 -04:00
renovate-bot 8ae0c72b3a chore(deps): update helm release velero to v11 2025-09-24 12:00:26 +00:00
williamp 26823926f4 rook-ceph: reenable pdbs 2025-09-23 20:11:17 -04:00
williamp 5f76e1889c rook-ceph: temporarily disable PDB while cluster reconciles 2025-09-23 20:05:42 -04:00
williamp fa3875cded Merge pull request 'chore(deps): update cloudflare/cloudflared docker tag to v2025.9.1' (#88) from renovate/cloudflare-cloudflared-2025.x into main
Reviewed-on: #88
2025-09-22 14:14:36 +00:00
williamp c15126ae2d Merge pull request 'chore(deps): update helm release external-secrets to v0.20.1' (#87) from renovate/external-secrets-0.x into main
Reviewed-on: #87
2025-09-22 14:10:45 +00:00
renovate-bot 30789a4ecd chore(deps): update cloudflare/cloudflared docker tag to v2025.9.1 2025-09-22 14:00:24 +00:00
renovate-bot 83ae71f7aa chore(deps): update helm release external-secrets to v0.20.1 2025-09-22 13:00:20 +00:00
williamp e9dfc7462c Merge pull request 'chore(deps): update prom/prometheus docker tag to v3.6.0' (#86) from renovate/prom-prometheus-3.x into main
Reviewed-on: #86
2025-09-22 01:56:44 +00:00
renovate-bot f808a60431 chore(deps): update prom/prometheus docker tag to v3.6.0 2025-09-21 21:00:20 +00:00
williamp 92c13f5fba Merge pull request 'chore(deps): update helm release velero to v10.1.3' (#85) from renovate/velero-10.x into main
Reviewed-on: #85
2025-09-20 02:15:21 +00:00
renovate-bot 23adb75ce4 chore(deps): update helm release velero to v10.1.3 2025-09-19 10:00:21 +00:00
williamp aaa88273c6 Merge pull request 'chore(deps): update cloudflare/cloudflared docker tag to v2025.9.0' (#84) from renovate/cloudflare-cloudflared-2025.x into main
Reviewed-on: #84
2025-09-18 13:05:54 +00:00
renovate-bot 3a9e0255b2 chore(deps): update cloudflare/cloudflared docker tag to v2025.9.0 2025-09-18 11:00:22 +00:00
williamp 815a559cb7 patch velero to use kubectl 1.33.4 until fixed upstream 2025-09-15 18:27:08 -04:00
williamp 20648809df Merge pull request 'chore(deps): update velero/velero-plugin-for-aws docker tag to v1.13.0' (#83) from renovate/velero-velero-plugin-for-aws-1.x into main
Reviewed-on: #83
2025-09-15 14:23:30 +00:00
renovate-bot 80be757153 chore(deps): update velero/velero-plugin-for-aws docker tag to v1.13.0 2025-09-15 10:00:25 +00:00
williamp 5b08954a84 Merge pull request 'chore(deps): update helm release rook-ceph to v1.18.2' (#82) from renovate/rook-ceph-1.x into main
Reviewed-on: #82
2025-09-10 22:44:36 +00:00
williamp 5d2e0225d9 Merge pull request 'chore(deps): update docker.io/rook/ceph docker tag to v1.18.2' (#81) from renovate/docker.io-rook-ceph-1.x into main
Reviewed-on: #81
2025-09-10 22:44:18 +00:00
renovate-bot 9b044cdebd chore(deps): update helm release rook-ceph to v1.18.2 2025-09-10 22:00:23 +00:00
renovate-bot 908d5e71d7 chore(deps): update docker.io/rook/ceph docker tag to v1.18.2 2025-09-10 22:00:20 +00:00
williamp 2341107e5f Merge pull request 'chore(deps): update helm release velero to v10.1.2' (#79) from renovate/velero-10.x into main
Reviewed-on: #79
2025-09-10 14:30:37 +00:00
williamp bb12dd3809 Merge pull request 'chore(deps): update helm release traefik to v37.1.1' (#80) from renovate/traefik-37.x into main
Reviewed-on: #80
2025-09-10 14:30:30 +00:00
renovate-bot 8144db9440 chore(deps): update helm release traefik to v37.1.1 2025-09-10 08:00:28 +00:00
renovate-bot cb483841de chore(deps): update helm release velero to v10.1.2 2025-09-10 05:00:19 +00:00
williamp 79d0d1d6f7 Merge pull request 'chore(deps): update helm release traefik to v37.1.0' (#78) from renovate/traefik-37.x into main
Reviewed-on: #78
2025-09-03 13:02:01 +00:00
renovate-bot 1467a9f7fe chore(deps): update helm release traefik to v37.1.0 2025-09-03 13:00:20 +00:00
williamp 28beb6f2be Merge pull request 'chore(deps): update helm release velero to v10.1.1' (#77) from renovate/velero-10.x into main
Reviewed-on: #77
2025-09-01 14:21:05 +00:00
renovate-bot 75b14a3854 chore(deps): update helm release velero to v10.1.1 2025-09-01 11:00:47 +00:00
williamp d8e24e5c77 Merge pull request 'chore(deps): update docker.io/rook/ceph docker tag to v1.18.1' (#75) from renovate/docker.io-rook-ceph-1.x into main
Reviewed-on: #75
2025-08-31 20:57:36 +00:00
williamp ee05ccfc05 Merge pull request 'chore(deps): update helm release rook-ceph to v1.18.1' (#76) from renovate/rook-ceph-1.x into main
Reviewed-on: #76
2025-08-31 20:57:25 +00:00
renovate-bot 19ac4bd46c chore(deps): update helm release rook-ceph to v1.18.1 2025-08-29 18:00:53 +00:00
renovate-bot 66b86e63cf chore(deps): update docker.io/rook/ceph docker tag to v1.18.1 2025-08-29 18:00:50 +00:00
williamp 7e9970645a rook-ceph: remove erasure coding 2025-08-24 16:36:46 -04:00
williamp 84b9efb5d5 remove adv in ceph object store 2025-08-24 16:33:54 -04:00
williamp 3dc7f6c0d1 fix cephobjectstore 2025-08-24 16:31:40 -04:00
williamp 6d9431ed6b rook-ceph: add s3 storageclass 2025-08-23 01:23:03 -04:00
williamp 4c64f79377 prepare s3 for https 2025-08-22 19:17:58 -04:00
williamp ec81a2b4c2 rook-ceph: enable ingress support for s3 2025-08-22 19:04:23 -04:00
williamp 73c50486d6 rook-ceph: add weyma-s3 object storage 2025-08-22 18:57:21 -04:00
williamp 62ac73c79e rook-ceph: update to ceph v19.2.3 2025-08-22 18:45:07 -04:00
williamp bf4fb0b800 Merge pull request 'chore(deps): update cloudflare/cloudflared docker tag to v2025.8.1' (#74) from renovate/cloudflare-cloudflared-2025.x into main
Reviewed-on: #74
2025-08-22 11:16:19 +00:00
renovate-bot c4dd583299 chore(deps): update cloudflare/cloudflared docker tag to v2025.8.1 2025-08-21 17:00:22 +00:00
williamp d617742e77 Merge pull request 'chore(deps): update helm release rook-ceph to v1.18.0' (#73) from renovate/rook-ceph-1.x into main
Reviewed-on: #73
2025-08-21 14:56:10 +00:00
williamp bdbbb67b3e Merge pull request 'chore(deps): update docker.io/rook/ceph docker tag to v1.18.0' (#72) from renovate/docker.io-rook-ceph-1.x into main
Reviewed-on: #72
2025-08-21 14:56:06 +00:00
renovate-bot 0283aaa84c chore(deps): update helm release rook-ceph to v1.18.0 2025-08-20 21:00:27 +00:00
renovate-bot 0e8ace8ea3 chore(deps): update docker.io/rook/ceph docker tag to v1.18.0 2025-08-20 21:00:25 +00:00
76 changed files with 11290 additions and 1562 deletions
+1
View File
@@ -1 +1,2 @@
.vscode/
test/
+37
View File
@@ -0,0 +1,37 @@
# Main Infrastructure: weyma-talos
**Production Kubernetes infrastructure with disaster recovery capabilities**
This repository contains the foundational infrastructure for my Kubernetes homelab, designed with reliability and rapid recovery as core principles.
## Architecture
My infrastructure follows a layered "black start" approach - essential services run outside the Kubernetes cluster to enable cluster bootstrapping and recovery from total failures.
### Black Start Layer
Static services (Docker Compose on TrueNAS/Proxmox) that provide cluster dependencies:
- Image cache for faster deployments and offline capability
- Talos discovery server for node bootstrapping
- HashiCorp Vault for secrets management (external to cluster)
- Future: Self-hosted Sidero Omni server (migrating from SaaS)
### System Apps Layer
Applications running within Kubernetes that provide core cluster functionality, managed via ArgoCD with GitOps principles.
## Repository Structure
- **`black-start/`** - Docker Compose services for cluster dependencies
- **`config-patches/`** - Talos Linux configuration patches for cluster and individual machines
- **`omni/`** - Sidero Omni [cluster template](https://docs.siderolabs.com/omni/reference/cluster-templates)
- **`system-apps/`** - System applications (ArgoCD projects) - monitoring, ingress, certificates, storage
## Tech Stack
**OS:** Talos Linux | **Orchestration:** Kubernetes | **GitOps:** ArgoCD | **Secrets:** Vault | **Storage:** Rook-Ceph
## Recovery Process
The "black start" architecture enables ~15-20 minute automated recovery from complete infrastructure failure:
1. Start black-start services → 2. Bootstrap Talos → 3. Deploy system apps → 4. Deploy core apps
For application deployments, see [core-apps](https://git.dubyatp.xyz/core-apps).
@@ -1,15 +0,0 @@
services:
prometheus:
image: prom/prometheus:v3.5.0
command:
- '--config.file=/etc/prometheus/prometheus.yaml'
- '--web.config.file=/etc/prometheus/web-config.yaml'
- '--web.enable-remote-write-receiver'
- '--storage.tsdb.retention.size=35GB'
ports:
- 9090:9090
volumes:
- ./.basicauthpass:/etc/prometheus/.basicauthpass
- ./prometheus.yaml:/etc/prometheus/prometheus.yaml
- ./web-config.yaml:/etc/prometheus/web-config.yaml
- /mnt/prometheus-data:/prometheus
@@ -1,32 +0,0 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: "prometheus"
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ["localhost:9090"]
basic_auth:
username: prometheus
password_file: /etc/prometheus/.basicauthpass
@@ -1,2 +0,0 @@
basic_auth_users:
prometheus: <redacted>
@@ -2,7 +2,7 @@ version: "3.8"
services:
discovery:
restart: unless-stopped
image: ghcr.io/siderolabs/discovery-service:v1.0.11
image: ghcr.io/siderolabs/discovery-service:v1.0.17
ports:
- 10.105.6.215:3000:3000
- 10.105.6.215:3001:3001
@@ -5,7 +5,7 @@ services:
command: tunnel run weyma-vault
env_file: ".env"
vault:
image: hashicorp/vault:1.20
image: hashicorp/vault:2.0
env_file: ".env.vault"
environment:
VAULT_ADDR: "https://weyma-vault.infra.dubyatp.xyz:8200"
@@ -0,0 +1,10 @@
cluster:
controllerManager:
extraArgs:
bind-address: "0.0.0.0"
proxy:
extraArgs:
metrics-bind-address: "0.0.0.0:10249"
scheduler:
extraArgs:
bind-address: "0.0.0.0"
@@ -1,8 +1,213 @@
cluster:
extraManifests:
- https://raw.githubusercontent.com/alex1989hu/kubelet-serving-cert-approver/main/deploy/standalone-install.yaml
- https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
inlineManifests:
- name: metrics-server
contents: |-
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- nodes/metrics
verbs:
- get
- apiGroups:
- ""
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=10250
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls
image: registry.k8s.io/metrics-server/metrics-server:v0.8.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 10250
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
- name: metrics-lb
contents: |-
apiVersion: v1
@@ -10,6 +215,8 @@ cluster:
metadata:
name: metrics-lb
namespace: kube-system
annotations:
metallb.io/ip-allocated-from-pool: test-pool
spec:
type: LoadBalancer
ports:
@@ -19,4 +226,3 @@ cluster:
targetPort: https
selector:
k8s-app: metrics-server
@@ -3,8 +3,17 @@ machine:
interfaces:
- deviceSelector:
hardwareAddr: "e8:ff:1e:d5:f8:22"
driver: "igc"
dhcp: true
vlans:
- vlanId: 50
dhcp: false
- deviceSelector:
hardwareAddr: "e8:ff:1e:d5:f8:21"
mtu: 9000
dhcp: true
- interface: br0
dhcp: false
bridge:
interfaces:
- enp2s0.50
@@ -3,8 +3,17 @@ machine:
interfaces:
- deviceSelector:
hardwareAddr: "f4:52:14:60:5e:30"
driver: "mlx4_core"
dhcp: true
vlans:
- vlanId: 50
dhcp: false
- deviceSelector:
hardwareAddr: "f4:52:14:60:5e:31"
dhcp: true
mtu: 9000
- interface: br0
dhcp: false
bridge:
interfaces:
- eno1.50
@@ -8,3 +8,11 @@ machine:
hardwareAddr: "00:16:3e:e5:79:0a"
dhcp: true
mtu: 9000
- deviceSelector:
hardwareAddr: "00:16:3e:6b:1c:1d"
dhcp: false
- interface: br0
dhcp: false
bridge:
interfaces:
- enx00163e6b1c1d
@@ -3,8 +3,17 @@ machine:
interfaces:
- deviceSelector:
hardwareAddr: "e8:ff:1e:d4:b8:89"
driver: "igc"
dhcp: true
vlans:
- vlanId: 50
dhcp: false
- deviceSelector:
hardwareAddr: "e8:ff:1e:d4:b8:8a"
mtu: 9000
dhcp: true
- interface: br0
dhcp: false
bridge:
interfaces:
- enp1s0.50
@@ -8,5 +8,14 @@ machine:
- deviceSelector:
hardwareAddr: "bc:24:11:f8:4a:92"
driver: "virtio*"
dhcp: true
mtu: 8996
dhcp: true
- deviceSelector:
hardwareAddr: "bc:24:11:93:02:0e"
driver: "virtio*"
dhcp: false
- interface: br0
dhcp: false
bridge:
interfaces:
- enxbc241193020e
+532
View File
@@ -0,0 +1,532 @@
kind: Cluster
name: weyma-talos
kubernetes:
version: v1.34.2
talos:
version: v1.11.5
features:
backupConfiguration:
interval: 6h0m0s
patches:
- idOverride: 500-5100c0c3-f72e-45f5-8cde-4a1c3b6f72a8
annotations:
description: pod-svc-subnets
name: User defined patch
inline:
cluster:
network:
podSubnets:
- 10.244.0.0/16
serviceSubnets:
- 10.112.0.0/12
- idOverride: 500-7c228773-8b44-40b0-8b4c-30f617668af0
annotations:
description: weyma-image-cache
name: User defined patch
inline:
machine:
registries:
mirrors:
docker.io:
endpoints:
- http://10.105.6.215:6000
factory.talos.dev:
endpoints:
- http://10.105.6.215:6004
gcr.io:
endpoints:
- http://10.105.6.215:6002
ghcr.io:
endpoints:
- http://10.105.6.215:6003
registry.k8s.io:
endpoints:
- http://10.105.6.215:6001
- idOverride: 500-f198cacc-280b-4874-a410-252c160621a7
annotations:
name: weyma-bind-addr
inline:
cluster:
controllerManager:
extraArgs:
bind-address: 0.0.0.0
proxy:
extraArgs:
proxy-mode: ipvs
metrics-bind-address: 0.0.0.0:10249
scheduler:
extraArgs:
bind-address: 0.0.0.0
- idOverride: 500-fc113705-0777-4b52-8df0-7cee67fcc68e
annotations:
name: weyma-bootstrap-metrics
inline:
cluster:
extraManifests:
- https://raw.githubusercontent.com/alex1989hu/kubelet-serving-cert-approver/main/deploy/standalone-install.yaml
inlineManifests:
- contents: |-
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- nodes/metrics
verbs:
- get
- apiGroups:
- ""
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=10250
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls
image: registry.k8s.io/metrics-server/metrics-server:v0.8.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 10250
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
name: metrics-server
- contents: |-
apiVersion: v1
kind: Service
metadata:
name: metrics-lb
namespace: kube-system
annotations:
metallb.io/ip-allocated-from-pool: test-pool
spec:
type: LoadBalancer
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
name: metrics-lb
- contents: |-
apiVersion: v1
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
log . {
class error
}
prometheus :9153
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
rewrite name git.dubyatp.xyz traefik-local.traefik.svc.cluster.local
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30 {
disable success cluster.local
disable denial cluster.local
}
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
name: coredns-config
---
kind: ControlPlane
machines:
- 20b4c826-e699-43b3-826d-73eb5173680b
- 5fdea709-56ad-45f2-966d-5e344dbe4fdf
- 30303031-3030-3030-6335-303731636665
---
kind: Workers
machines:
- 02c02200-f403-11ef-9372-70f446672600
- 03000200-0400-0500-0006-000700080009
- 1006b91a-ecbf-11ea-aed4-046ba1ee3700
- 5f0cd701-0784-4fcc-8e52-3b3304049972
- da507021-8912-4337-86a3-94a05bd1cf05
---
kind: Machine
name: 02c02200-f403-11ef-9372-70f446672600
patches:
- idOverride: 400-cm-02c02200-f403-11ef-9372-70f446672600
annotations:
name: ""
inline:
machine:
network:
hostname: weyma-talos-w02
interfaces:
- deviceSelector:
driver: igc
hardwareAddr: e8:ff:1e:d4:b8:89
dhcp: true
vlans:
- dhcp: false
vlanId: 50
- deviceSelector:
hardwareAddr: e8:ff:1e:d4:b8:8a
dhcp: true
mtu: 9000
- bridge:
interfaces:
- enp1s0.50
dhcp: false
interface: br0
---
kind: Machine
name: 03000200-0400-0500-0006-000700080009
patches:
- idOverride: 400-cm-03000200-0400-0500-0006-000700080009
annotations:
name: ""
inline:
machine:
network:
hostname: weyma-talos-testw01
interfaces:
- deviceSelector:
driver: igc
hardwareAddr: e8:ff:1e:d5:f8:22
dhcp: true
vlans:
- dhcp: false
vlanId: 50
- deviceSelector:
hardwareAddr: e8:ff:1e:d5:f8:21
dhcp: true
mtu: 9000
- bridge:
interfaces:
- enp2s0.50
dhcp: false
interface: br0
---
kind: Machine
name: 1006b91a-ecbf-11ea-aed4-046ba1ee3700
patches:
- idOverride: 400-cm-1006b91a-ecbf-11ea-aed4-046ba1ee3700
annotations:
name: ""
inline:
machine:
network:
hostname: weyma-talos-testw04
interfaces:
- deviceSelector:
driver: mlx4_core
hardwareAddr: f4:52:14:60:5e:30
dhcp: true
vlans:
- dhcp: false
vlanId: 50
- deviceSelector:
hardwareAddr: f4:52:14:60:5e:31
dhcp: true
mtu: 9000
- bridge:
interfaces:
- eno1.50
dhcp: false
interface: br0
---
kind: Machine
name: 30303031-3030-3030-6335-303731636665
patches:
- idOverride: 400-cm-30303031-3030-3030-6335-303731636665
inline:
machine:
network:
hostname: weyma-talos-cp04
interfaces:
- deviceSelector:
hardwareAddr: dc:a6:32:95:0f:cb
dhcp: true
---
kind: Machine
name: 20b4c826-e699-43b3-826d-73eb5173680b
patches:
- idOverride: 400-cm-20b4c826-e699-43b3-826d-73eb5173680b
annotations:
name: ""
inline:
machine:
network:
hostname: weyma-talos-cp02
interfaces:
- deviceSelector:
driver: virtio*
hardwareAddr: 00:16:3e:9c:01:27
dhcp: true
---
kind: Machine
name: 5f0cd701-0784-4fcc-8e52-3b3304049972
patches:
- idOverride: 400-cm-5f0cd701-0784-4fcc-8e52-3b3304049972
annotations:
name: ""
inline:
machine:
network:
hostname: weyma-talos-testw05
interfaces:
- deviceSelector:
hardwareAddr: 00:16:3e:b3:dd:f8
dhcp: true
- deviceSelector:
hardwareAddr: 00:16:3e:e5:79:0a
dhcp: true
mtu: 9000
- deviceSelector:
hardwareAddr: 00:16:3e:6b:1c:1d
dhcp: false
- bridge:
interfaces:
- enx00163e6b1c1d
dhcp: false
interface: br0
---
kind: Machine
systemExtensions:
- siderolabs/nut-client
- siderolabs/qemu-guest-agent
name: 5fdea709-56ad-45f2-966d-5e344dbe4fdf
patches:
- idOverride: 400-cm-5fdea709-56ad-45f2-966d-5e344dbe4fdf
annotations:
name: ""
inline:
machine:
network:
hostname: weyma-talos-cp01
interfaces:
- deviceSelector:
driver: virtio*
hardwareAddr: bc:24:11:e6:ff:7b
dhcp: true
---
kind: Machine
name: da507021-8912-4337-86a3-94a05bd1cf05
patches:
- idOverride: 400-cm-da507021-8912-4337-86a3-94a05bd1cf05
annotations:
name: ""
inline:
machine:
network:
hostname: weyma-talos-w03
interfaces:
- deviceSelector:
driver: virtio*
hardwareAddr: bc:24:11:be:6c:08
dhcp: true
- deviceSelector:
driver: virtio*
hardwareAddr: bc:24:11:f8:4a:92
dhcp: true
mtu: 8996
- deviceSelector:
driver: virtio*
hardwareAddr: bc:24:11:93:02:0e
dhcp: false
- bridge:
interfaces:
- enxbc241193020e
dhcp: false
interface: br0
+35 -1
View File
@@ -1,3 +1,37 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json"
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"customManagers": [
{
"customType": "regex",
"description": "Update Ceph version in Rook CephCluster spec",
"managerFilePatterns": ["/(^|/)rook-ceph-cluster\\.ya?ml$/"],
"matchStrings": [
"image:\\s*[\"']?(?<depName>quay\\.io/ceph/ceph):v(?<currentValue>\\d+\\.\\d+\\.\\d+)(?:-\\d+)?[\"']?"
],
"datasourceTemplate": "docker",
"versioningTemplate": "loose",
"extractVersionTemplate": "^v?(?<version>\\d+\\.\\d+\\.\\d+)"
}
],
"packageRules": [
{
"description": "Consolidate patch and minor updates to one PR",
"matchUpdateTypes": ["minor", "patch"],
"groupName": "all-minor-patch-updates"
},
{
"description": "Rook Ceph - auto-update minor and patch versions only",
"matchDatasources": ["docker"],
"matchPackageNames": ["quay.io/ceph/ceph"],
"matchUpdateTypes": ["minor", "patch"],
"enabled": true
},
{
"description": "Rook Ceph - block major version upgrades",
"matchDatasources": ["docker"],
"matchPackageNames": ["quay.io/ceph/ceph"],
"matchUpdateTypes": ["major"],
"enabled": false
}
]
}
+28
View File
@@ -0,0 +1,28 @@
apiVersion: v2
name: argocd
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "1.0"
dependencies:
- name: argo-cd
version: 9.5.4
repository: https://argoproj.github.io/argo-helm
+318
View File
@@ -0,0 +1,318 @@
argo-cd:
global:
domain: argocd.infra.dubyatp.xyz
configs:
cm:
admin.enabled: false
dex.config: |
connectors:
- config:
issuer: https://auth.dubyatp.xyz/application/o/argocd/
clientID: ZZ4Rt3ZixVu9ote8yzryHFrEhlbY85C24Hh9Uo98
clientSecret: $weyma-argocd-secrets:dex.authentik.clientSecret
insecureEnableGroups: true
scopes:
- openid
- profile
- email
name: authentik
type: oidc
id: authentik
resource.customizations.ignoreDifferences.admissionregistration.k8s.io_MutatingWebhookConfiguration: |
jsonPointers:
- /webhooks/0/clientConfig/caBundle
resource.customizations.ignoreDifferences.admissionregistration.k8s.io_ValidatingWebhookConfiguration: |
jsonPointers:
- /webhooks/0/clientConfig/caBundle
resource.customizations.ignoreDifferences.Secret: |
jsonPointers:
- /data
params:
server.insecure: true
ssh:
extraHosts: |
git.dubyatp.xyz ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC/9ygk32Ibk6/ZqIhwh0ZyTTDpdXxP/BUgtJI4FVLKVWIEFnB+fOCTsSXM/mt8R0Xld/AJ+muywNhZc60nEAg+Pj4yxc0u75t1Ea+C8JjEh2xW7rH+oZLv+JabcLk5Ze7rpaETkq2ILxNmBKemgDut8mXt9BYBo5mk72ClWBsijFWw8Vj8LnWfzw/VsFFlQ4CJvnLuTqw+bgI5VlodwR20wcEHuSuUKY9IA2hyDZLWJ2vZzIlI8TuY21Qc8vFmEVB7M1mgqLJKksdi/ZLHk5UN9HTRz0Q6SaNyvPfjWCeHX8Tb3WnsAnHvnXc0C3A2EWVFqHIpJwVRTC2ef6LCUmZmPv1NqnFWftv192n+oOJqT+537fNesK7tQJfX4Osi5RDCL788GjJHLOarzEIKegpunCjq/9yp/Oi6M/v+/eN7rdd/UY80mcmoOC1HOVPfjxmCfcFFpqKX3NYlx/czF+gpf0mRBaHEpkGk3oPrqGiZbSAShsLDvptZANmBoBSDFwFwJpHxdMzMOLM8NyQNewKs1pYbjklbuC5W33qjgHdVk56jnGVPwCVak/TQgoOI9NtxnfvfV6sB5mQWEkiNsUzEVK3hgu5Wa93vN/DZ75KoS95Ldj4pCfJV92eeeYWvrRPAIdnzxjH2rdfhysHW2NYFdl7PlAqcca+CaO4WOHOMJw==
git-ssh.dubyatp.xyz ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC/9ygk32Ibk6/ZqIhwh0ZyTTDpdXxP/BUgtJI4FVLKVWIEFnB+fOCTsSXM/mt8R0Xld/AJ+muywNhZc60nEAg+Pj4yxc0u75t1Ea+C8JjEh2xW7rH+oZLv+JabcLk5Ze7rpaETkq2ILxNmBKemgDut8mXt9BYBo5mk72ClWBsijFWw8Vj8LnWfzw/VsFFlQ4CJvnLuTqw+bgI5VlodwR20wcEHuSuUKY9IA2hyDZLWJ2vZzIlI8TuY21Qc8vFmEVB7M1mgqLJKksdi/ZLHk5UN9HTRz0Q6SaNyvPfjWCeHX8Tb3WnsAnHvnXc0C3A2EWVFqHIpJwVRTC2ef6LCUmZmPv1NqnFWftv192n+oOJqT+537fNesK7tQJfX4Osi5RDCL788GjJHLOarzEIKegpunCjq/9yp/Oi6M/v+/eN7rdd/UY80mcmoOC1HOVPfjxmCfcFFpqKX3NYlx/czF+gpf0mRBaHEpkGk3oPrqGiZbSAShsLDvptZANmBoBSDFwFwJpHxdMzMOLM8NyQNewKs1pYbjklbuC5W33qjgHdVk56jnGVPwCVak/TQgoOI9NtxnfvfV6sB5mQWEkiNsUzEVK3hgu5Wa93vN/DZ75KoS95Ldj4pCfJV92eeeYWvrRPAIdnzxjH2rdfhysHW2NYFdl7PlAqcca+CaO4WOHOMJw==
rbac:
policy.csv: |
g, ArgoCD Admins, role:admin
controller:
metrics:
enabled: true
serviceMonitor:
enabled: true
rules:
enabled: true
spec:
- alert: ArgoAppMissing
expr: |
absent(argocd_app_info) == 1
for: 15m
labels:
severity: critical
annotations:
summary: "[Argo CD] No reported applications"
description: >
Argo CD has not reported any applications data for the past 15 minutes which
means that it must be down or not functioning properly. This needs to be
resolved for this cloud to continue to maintain state.
server:
ingress:
enabled: true
livenessProbe:
enabled: true
readinessProbe:
enabled: true
metrics:
enabled: true
serviceMonitor:
enabled: true
repoServer:
livenessProbe:
enabled: true
readinessProbe:
enabled: true
metrics:
enabled: true
serviceMonitor:
enabled: true
applicationSet:
livenessProbe:
enabled: true
readinessProbe:
enabled: true
metrics:
enabled: true
serviceMonitor:
enabled: true
redis:
livenessProbe:
enabled: true
readinessProbe:
enabled: true
metrics:
enabled: true
serviceMonitor:
enabled: true
dex:
livenessProbe:
enabled: true
readinessProbe:
enabled: true
metrics:
enabled: true
serviceMonitor:
enabled: true
notifications:
metrics:
enabled: true
serviceMonitor:
enabled: true
extraObjects:
- apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: weyma-argocd-secrets
labels:
app.kubernetes.io/part-of: argocd
spec:
refreshInterval: 1h
secretStoreRef:
name: weyma-vault
kind: ClusterSecretStore
target:
name: weyma-argocd-secrets
creationPolicy: Owner
data:
- secretKey: webhook.gitea.secret
remoteRef:
key: argo-cd
property: webhook.gitea.secret
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- secretKey: admin.password
remoteRef:
key: argo-cd
property: admin.password
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- secretKey: admin.passwordMtime
remoteRef:
key: argo-cd
property: admin.passwordMtime
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- secretKey: dex.authentik.clientSecret
remoteRef:
key: argo-cd
property: dex.authentik.clientSecret
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: git-core-apps
labels:
app.kubernetes.io/part-of: argocd
argocd.argoproj.io/secret-type: repository
spec:
refreshInterval: 1h
secretStoreRef:
name: weyma-vault
kind: ClusterSecretStore
target:
name: git-core-apps
creationPolicy: Owner
data:
- secretKey: sshPrivateKey
remoteRef:
key: argo-cd-git
property: sshPrivateKey
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- secretKey: type
remoteRef:
key: argo-cd-git
property: type
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- secretKey: url
remoteRef:
key: argo-cd-git
property: url.core-apps
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: git-weyma-talos
labels:
app.kubernetes.io/part-of: argocd
argocd.argoproj.io/secret-type: repository
spec:
refreshInterval: 1h
secretStoreRef:
name: weyma-vault
kind: ClusterSecretStore
target:
name: git-weyma-talos
creationPolicy: Owner
data:
- secretKey: sshPrivateKey
remoteRef:
key: argo-cd-git
property: sshPrivateKey
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- secretKey: type
remoteRef:
key: argo-cd-git
property: type
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- secretKey: url
remoteRef:
key: argo-cd-git
property: url.weyma-talos
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: git-williamp-sites
labels:
app.kubernetes.io/part-of: argocd
argocd.argoproj.io/secret-type: repository
spec:
refreshInterval: 1h
secretStoreRef:
name: weyma-vault
kind: ClusterSecretStore
target:
name: git-williamp-sites
creationPolicy: Owner
data:
- secretKey: sshPrivateKey
remoteRef:
key: argo-cd-git
property: sshPrivateKey
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- secretKey: type
remoteRef:
key: argo-cd-git
property: type
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- secretKey: url
remoteRef:
key: argo-cd-git
property: url.williamp-sites
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: git-db-operators
labels:
app.kubernetes.io/part-of: argocd
argocd.argoproj.io/secret-type: repository
spec:
refreshInterval: 1h
secretStoreRef:
name: weyma-vault
kind: ClusterSecretStore
target:
name: git-db-operators
creationPolicy: Owner
data:
- secretKey: sshPrivateKey
remoteRef:
key: argo-cd-git
property: sshPrivateKey
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- secretKey: type
remoteRef:
key: argo-cd-git
property: type
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
- secretKey: url
remoteRef:
key: argo-cd-git
property: url.db-operators
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
+1 -1
View File
@@ -24,5 +24,5 @@ appVersion: "1.0"
dependencies:
- name: cert-manager
version: v1.18.2
version: v1.20.2
repository: https://charts.jetstack.io
@@ -9,6 +9,7 @@ spec:
- dubyatp.xyz
- '*.dubyatp.xyz'
- '*.infra.dubyatp.xyz'
- "*.weyma-s3.infra.dubyatp.xyz"
issuerRef:
kind: ClusterIssuer
name: letsencrypt-dubyatp-xyz
+4
View File
@@ -9,3 +9,7 @@ cert-manager:
cainjector:
serviceLabels:
metrics_enabled: "true"
prometheus:
enabled: true
servicemonitor:
enabled: true
@@ -24,5 +24,5 @@ appVersion: "1.0"
dependencies:
- name: external-secrets
version: 0.19.2
version: 2.3.0
repository: https://charts.external-secrets.io
@@ -171,7 +171,7 @@ resources: {}
serviceMonitor:
# -- Specifies whether to create a ServiceMonitor resource for collecting Prometheus metrics
enabled: false
enabled: true
# -- namespace where you want to install ServiceMonitors
namespace: ""
@@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guestbook-ui
namespace: guestbook-ui
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: guestbook-ui
template:
metadata:
labels:
app: guestbook-ui
spec:
containers:
- image: gcr.io/heptio-images/ks-guestbook-demo:0.2
name: guestbook-ui
ports:
- containerPort: 80
@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: guestbook-ui
@@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: guestbook-ui
namespace: guestbook-ui
spec:
ports:
- port: 80
targetPort: 80
selector:
app: guestbook-ui
+28
View File
@@ -0,0 +1,28 @@
apiVersion: v2
name: kite
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "1.0"
dependencies:
- name: kite
version: 0.10.0
repository: https://zxh326.github.io/kite
+30
View File
@@ -0,0 +1,30 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: kite-secret
namespace: {{ .Release.Namespace }}
spec:
refreshInterval: 1h
secretStoreRef:
name: weyma-vault
kind: ClusterSecretStore
target:
name: kite-secret
creationPolicy: Owner
data:
- secretKey: JWT_SECRET
remoteRef:
key: kite
property: JWT_SECRET
- secretKey: KITE_ENCRYPT_KEY
remoteRef:
key: kite
property: KITE_ENCRYPT_KEY
- secretKey: KITE_PASSWORD
remoteRef:
key: kite
property: KITE_PASSWORD
- secretKey: KITE_USERNAME
remoteRef:
key: kite
property: KITE_USERNAME
+22
View File
@@ -0,0 +1,22 @@
kite:
host: "https://weyma-kite.infra.dubyatp.xyz"
deploymentStrategy:
type: Recreate
secret:
create: false
existingSecret: kite-secret
db:
sqlite:
persistence:
pvc:
enabled: true
ingress:
enabled: true
className: "traefik"
hosts:
- host: weyma-kite.infra.dubyatp.xyz
paths:
- path: /
pathType: ImplementationSpecific
podAnnotations:
backup.velero.io/backup-volumes: kite-storage
+1 -1
View File
@@ -24,5 +24,5 @@ appVersion: "1.0"
dependencies:
- name: kubernetes-replicator
version: 2.12.0
version: 2.12.3
repository: https://helm.mittwald.de
+14
View File
@@ -0,0 +1,14 @@
apiVersion: kubevirt.io/v1
kind: KubeVirt
metadata:
annotations:
kubevirt.io/latest-observed-api-version: v1
kubevirt.io/storage-observed-api-version: v1
name: kubevirt
namespace: kubevirt
spec:
configuration:
developerConfiguration:
featureGates:
- MultiArchitecture
imagePullPolicy: IfNotPresent
File diff suppressed because it is too large Load Diff
+1 -1
View File
@@ -24,5 +24,5 @@ appVersion: "1.0"
dependencies:
- name: metallb
version: 0.15.2
version: 0.15.3
repository: https://metallb.github.io/metallb
+3 -361
View File
@@ -1,365 +1,7 @@
# Default values for metallb.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
loadBalancerClass: ""
# To configure MetalLB, you must specify ONE of the following two
# options.
rbac:
# create specifies whether to install and use RBAC rules.
create: true
metallb:
prometheus:
# scrape annotations specifies whether to add Prometheus metric
# auto-collection annotations to pods. See
# https://github.com/prometheus/prometheus/blob/release-2.1/documentation/examples/prometheus-kubernetes.yml
# for a corresponding Prometheus configuration. Alternatively, you
# may want to use the Prometheus Operator
# (https://github.com/coreos/prometheus-operator) for more powerful
# monitoring configuration. If you use the Prometheus operator, this
# can be left at false.
scrapeAnnotations: false
# port both controller and speaker will listen on for metrics
metricsPort: 7472
# if set, enables rbac proxy on the controller and speaker to expose
# the metrics via tls.
# secureMetricsPort: 9120
# the name of the secret to be mounted in the speaker pod
# to expose the metrics securely. If not present, a self signed
# certificate to be used.
speakerMetricsTLSSecret: ""
# the name of the secret to be mounted in the controller pod
# to expose the metrics securely. If not present, a self signed
# certificate to be used.
controllerMetricsTLSSecret: ""
# prometheus doesn't have the permission to scrape all namespaces so we give it permission to scrape metallb's one
rbacPrometheus: true
# the service account used by prometheus
# required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
serviceAccount: ""
# the namespace where prometheus is deployed
# required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
namespace: ""
# the image to be used for the kuberbacproxy container
rbacProxy:
repository: gcr.io/kubebuilder/kube-rbac-proxy
tag: v0.12.0
pullPolicy:
# Prometheus Operator PodMonitors
rbacPrometheus: false
podMonitor:
# enable support for Prometheus Operator
enabled: false
# optional additional labels for podMonitors
additionalLabels: {}
# optional annotations for podMonitors
annotations: {}
# Job label for scrape target
jobLabel: "app.kubernetes.io/name"
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval:
# metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# target_label: nodename
# replacement: $1
# action: replace
# Prometheus Operator ServiceMonitors. To be used as an alternative
# to podMonitor, supports secure metrics.
serviceMonitor:
# enable support for Prometheus Operator
enabled: false
speaker:
# optional additional labels for the speaker serviceMonitor
additionalLabels: {}
# optional additional annotations for the speaker serviceMonitor
annotations: {}
# optional tls configuration for the speaker serviceMonitor, in case
# secure metrics are enabled.
tlsConfig:
insecureSkipVerify: true
controller:
# optional additional labels for the controller serviceMonitor
additionalLabels: {}
# optional additional annotations for the controller serviceMonitor
annotations: {}
# optional tls configuration for the controller serviceMonitor, in case
# secure metrics are enabled.
tlsConfig:
insecureSkipVerify: true
# Job label for scrape target
jobLabel: "app.kubernetes.io/name"
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval:
# metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# target_label: nodename
# replacement: $1
# action: replace
# Prometheus Operator alertmanager alerts
enabled: true
prometheusRule:
# enable alertmanager alerts
enabled: false
# optional additional labels for prometheusRules
additionalLabels: {}
# optional annotations for prometheusRules
annotations: {}
# MetalLBStaleConfig
staleConfig:
enabled: true
labels:
severity: warning
# MetalLBConfigNotLoaded
configNotLoaded:
enabled: true
labels:
severity: warning
# MetalLBAddressPoolExhausted
addressPoolExhausted:
enabled: true
labels:
severity: critical
addressPoolUsage:
enabled: true
thresholds:
- percent: 75
labels:
severity: warning
- percent: 85
labels:
severity: warning
- percent: 95
labels:
severity: critical
# MetalLBBGPSessionDown
bgpSessionDown:
enabled: true
labels:
severity: critical
extraAlerts: []
# controller contains configuration specific to the MetalLB cluster
# controller.
controller:
enabled: true
# -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
# command: /controller
# webhookMode: enabled
image:
repository: quay.io/metallb/controller
tag: v0.15.2
pullPolicy:
## @param controller.updateStrategy.type Metallb controller deployment strategy type.
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
## e.g:
## strategy:
## type: RollingUpdate
## rollingUpdate:
## maxSurge: 25%
## maxUnavailable: 25%
##
strategy:
type: RollingUpdate
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use. If not set and create is
# true, a name is generated using the fullname template
name: ""
annotations: {}
securityContext:
runAsNonRoot: true
# nobody
runAsUser: 65534
fsGroup: 65534
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
priorityClassName: ""
runtimeClassName: ""
affinity: {}
podAnnotations: {}
labels: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
tlsMinVersion: "VersionTLS12"
tlsCipherSuites: ""
extraContainers: []
# speaker contains configuration specific to the MetalLB speaker
# daemonset.
speaker:
enabled: true
# command: /speaker
# -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
tolerateMaster: true
memberlist:
enabled: true
mlBindPort: 7946
mlBindAddrOverride: ""
mlSecretKeyPath: "/etc/ml_secret_key"
excludeInterfaces:
enabled: true
# ignore the exclude-from-external-loadbalancer label
ignoreExcludeLB: false
image:
repository: quay.io/metallb/speaker
tag: v0.15.2
pullPolicy:
## @param speaker.updateStrategy.type Speaker daemonset strategy type
## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
##
updateStrategy:
## StrategyType
## Can be set to RollingUpdate or OnDelete
##
type: RollingUpdate
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use. If not set and create is
# true, a name is generated using the fullname template
name: ""
annotations: {}
securityContext: {}
## Defines a secret name for the controller to generate a memberlist encryption secret
## By default secretName: {{ "metallb.fullname" }}-memberlist
##
# secretName:
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
priorityClassName: ""
affinity: {}
## Selects which runtime class will be used by the pod.
runtimeClassName: ""
podAnnotations: {}
labels: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
startupProbe:
enabled: true
failureThreshold: 30
periodSeconds: 5
# frr contains configuration specific to the MetalLB FRR container,
# for speaker running alongside FRR.
frr:
enabled: false
image:
repository: quay.io/frrouting/frr
tag: 10.4.1
pullPolicy:
metricsPort: 7473
resources: {}
# if set, enables a rbac proxy sidecar container on the speaker to
# expose the frr metrics via tls.
# secureMetricsPort: 9121
reloader:
resources: {}
frrMetrics:
resources: {}
extraContainers: []
crds:
enabled: true
validationFailurePolicy: Fail
# frrk8s contains the configuration related to using an frrk8s instance
# (github.com/metallb/frr-k8s) as the backend for the BGP implementation.
# This allows configuring additional frr parameters in combination to those
# applied by MetalLB.
frrk8s:
# if set, enables frrk8s as a backend. This is mutually exclusive to frr
# mode.
enabled: false
external: false
namespace: ""
+28
View File
@@ -0,0 +1,28 @@
apiVersion: v2
name: kube-prometheus-stack
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "1.0"
dependencies:
- name: kube-prometheus-stack
version: 83.7.0
repository: https://prometheus-community.github.io/helm-charts
-21
View File
@@ -1,21 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus-agent
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/proxy
- nodes/metrics
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["ingresses"]
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
-108
View File
@@ -1,108 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: prom-agent-config
namespace: monitoring
data:
prometheus.yml: |
global:
scrape_interval: 15s
scrape_configs:
- job_name: 'weyma-talos-nodes-kubelet'
kubernetes_sd_configs:
- role: node
scheme: https
tls_config:
insecure_skip_verify: true
authorization:
credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- action: labeldrop
regex: cpu_feature_node_kubevirt_io_.+
- action: labeldrop
regex: cpu_model_migration_node_kubevirt_io_.+
- action: labeldrop
regex: cpu_model_node_kubevirt_io_.+
- action: labeldrop
regex: cpu_timer_node_kubevirt_io_.+
- action: labeldrop
regex: cpu_vendor_node_kubevirt_io_.+
- action: labeldrop
regex: host_model_cpu_node_kubevirt_io_.+
- action: labeldrop
regex: host_model_required_features_node_kubevirt_io_.+
- action: labeldrop
regex: hyperv_node_kubevirt_io_.+
- job_name: 'weyma-talos-nodes-metrics'
kubernetes_sd_configs:
- role: node
scheme: https
tls_config:
insecure_skip_verify: true
authorization:
credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__address__]
regex: (.+):\d+
target_label: __address__
replacement: ${1}:9100
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- action: labeldrop
regex: cpu_feature_node_kubevirt_io_.+
- action: labeldrop
regex: cpu_model_migration_node_kubevirt_io_.+
- action: labeldrop
regex: cpu_model_node_kubevirt_io_.+
- action: labeldrop
regex: cpu_timer_node_kubevirt_io_.+
- action: labeldrop
regex: cpu_vendor_node_kubevirt_io_.+
- action: labeldrop
regex: host_model_cpu_node_kubevirt_io_.+
- action: labeldrop
regex: host_model_required_features_node_kubevirt_io_.+
- action: labeldrop
regex: hyperv_node_kubevirt_io_.+
- job_name: 'weyma-talos-service-endpoints'
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_service_label_metrics_enabled]
regex: true
action: keep
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: service
- job_name: 'weyma-talos-rook'
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_service_name]
regex: ^rook-ceph-(exporter|mgr)$
action: keep
- source_labels: [__address__]
regex: ^[^:]+:(9283|9926)$
action: keep
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: service
remote_write:
- url: "https://10.105.15.20:30104/api/v1/write"
basic_auth:
username: prometheus
password_file: /etc/prometheus/secrets/.basicauthpass
tls_config:
insecure_skip_verify: true
-41
View File
@@ -1,41 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus-agent
spec:
replicas: 1
selector:
matchLabels:
app: prometheus-agent
template:
metadata:
labels:
app: prometheus-agent
spec:
serviceAccountName: prometheus-agent
containers:
- name: prometheus
image: prom/prometheus:v3.2.1
args:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--agent"
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 500m
memory: 1Gi
volumeMounts:
- name: config-volume
mountPath: /etc/prometheus
- name: auth
mountPath: /etc/prometheus/secrets
volumes:
- name: config-volume
configMap:
name: prom-agent-config
- name: auth
secret:
secretName: prometheus-auth
@@ -1,22 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.9.1
name: node-exporter
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
@@ -1,17 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.9.1
name: node-exporter
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: node-exporter
subjects:
- kind: ServiceAccount
name: node-exporter
namespace: monitoring
@@ -1,121 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.9.1
name: node-exporter
namespace: monitoring
spec:
selector:
matchLabels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: node-exporter
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.9.1
spec:
automountServiceAccountToken: true
containers:
- args:
- --web.listen-address=127.0.0.1:9100
- --path.sysfs=/host/sys
- --path.rootfs=/host/root
- --path.udev.data=/host/root/run/udev/data
- --no-collector.wifi
- --no-collector.hwmon
- --no-collector.btrfs
- --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|run/k3s/containerd/.+|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)
- --collector.netclass.ignored-devices=^(veth.*|[a-f0-9]{15})$
- --collector.netdev.device-exclude=^(veth.*|[a-f0-9]{15})$
image: quay.io/prometheus/node-exporter:v1.9.1
name: node-exporter
resources:
limits:
cpu: 250m
memory: 180Mi
requests:
cpu: 102m
memory: 180Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- SYS_TIME
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /host/sys
mountPropagation: HostToContainer
name: sys
readOnly: true
- mountPath: /host/root
mountPropagation: HostToContainer
name: root
readOnly: true
- args:
- --secure-listen-address=[$(IP)]:9100
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- --upstream=http://127.0.0.1:9100/
env:
- name: IP
valueFrom:
fieldRef:
fieldPath: status.podIP
image: quay.io/brancz/kube-rbac-proxy:v0.19.1
name: kube-rbac-proxy
ports:
- containerPort: 9100
hostPort: 9100
name: https
resources:
limits:
cpu: 20m
memory: 40Mi
requests:
cpu: 10m
memory: 20Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
hostNetwork: true
hostPID: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
securityContext:
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: node-exporter
tolerations:
- operator: Exists
volumes:
- hostPath:
path: /sys
name: sys
- hostPath:
path: /
name: root
updateStrategy:
rollingUpdate:
maxUnavailable: 10%
type: RollingUpdate
@@ -1,29 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.9.1
name: node-exporter
namespace: monitoring
spec:
egress:
- {}
ingress:
- from:
- podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
ports:
- port: 9100
protocol: TCP
podSelector:
matchLabels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
policyTypes:
- Egress
- Ingress
@@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.9.1
name: node-exporter
namespace: monitoring
spec:
ports:
- name: https
port: 9100
targetPort: https
selector:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
@@ -1,11 +0,0 @@
apiVersion: v1
automountServiceAccountToken: false
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.9.1
name: node-exporter
namespace: monitoring
-17
View File
@@ -1,17 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: prometheus-auth
spec:
refreshInterval: 1h
secretStoreRef:
name: weyma-vault
kind: ClusterSecretStore
target:
name: prometheus-auth
creationPolicy: Owner
data:
- secretKey: .basicauthpass
remoteRef:
key: monitoring
property: prometheus-password
@@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-agent
namespace: monitoring
@@ -0,0 +1,22 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: discord-webhook
namespace: {{ .Release.Namespace }}
spec:
refreshInterval: 1h
secretStoreRef:
name: weyma-vault
kind: ClusterSecretStore
target:
name: discord-webhook
creationPolicy: Owner
data:
- secretKey: webhook
remoteRef:
conversionStrategy: Default
decodingStrategy: None
metadataPolicy: None
nullBytePolicy: Ignore
key: monitoring
property: discord_webhook
@@ -0,0 +1,67 @@
{{- if .Values.discord.enabled }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: alertmanager-discord
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: alertmanager-discord
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
replicas: 1
selector:
matchLabels:
app: alertmanager-discord
template:
metadata:
labels:
app: alertmanager-discord
spec:
containers:
- name: alertmanager-discord
image: {{ .Values.discord.image | default "ghcr.io/rogerrum/alertmanager-discord:1.0.7" }}
ports:
- containerPort: 9094
env:
- name: LISTEN_ADDRESS
value: "0.0.0.0:9094"
- name: DISCORD_WEBHOOK
valueFrom:
secretKeyRef:
name: {{ .Values.discord.secret.name | quote }}
key: {{ .Values.discord.secret.key | quote }}
{{- if .Values.discord.username }}
- name: DISCORD_USERNAME
value: {{ .Values.discord.username | quote }}
{{- end }}
{{- if .Values.discord.avatar_url }}
- name: DISCORD_AVATAR_URL
value: {{ .Values.discord.avatar_url | quote }}
{{- end }}
{{- if .Values.discord.verbose }}
- name: VERBOSE
value: "ON"
{{- end }}
resources:
requests:
memory: "128Mi"
cpu: "500m"
limits:
memory: "512Mi"
cpu: "1"
---
apiVersion: v1
kind: Service
metadata:
name: alertmanager-discord
namespace: {{ .Release.Namespace }}
spec:
type: ClusterIP
selector:
app: alertmanager-discord
ports:
- port: 9094
targetPort: 9094
protocol: TCP
{{- end }}
+80
View File
@@ -0,0 +1,80 @@
kube-prometheus-stack:
alertmanager:
config:
route:
group_by: ['namespace']
group_wait: 30s
group_interval: 5m
repeat_interval: 12h
receiver: 'null'
routes:
- receiver: "discord_webhook"
matchers:
- severity = "critical"
continue: false
- receiver: 'null'
receivers:
- name: "null"
- name: "discord_webhook"
webhook_configs:
- url: "http://alertmanager-discord:9094"
alertmanagerSpec:
storage:
volumeClaimTemplate:
spec:
storageClassName: rook-ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 50Gi
additionalPrometheusRulesMap:
rule-name:
groups:
- name: AdditionalAlerts
rules:
- alert: ExcessiveWarnings
expr: count(ALERTS{severity="warning",alertstate="firing"}) >= 5
for: 1m
labels:
severity: critical
annotations:
summary: Excessive 'warning' alerts are firing in the cluster
description: "{{ $value }} alerts with 'warning' severity are firing and could be a sign of catastrophic failure in the cluster"
prometheusOperator:
admissionWebhooks:
certManager:
enabled: true
prometheus:
prometheusSpec:
ruleSelectorNilUsesHelmValues: false
serviceMonitorSelectorNilUsesHelmValues: false
podMonitorSelectorNilUsesHelmValues: false
probeSelectorNilUsesHelmValues: false
scrapeConfigSelectorNilUsesHelmValues: false
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: rook-ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 50Gi
thanosRuler:
thanosRulerSpec:
storage:
volumeClaimTemplate:
spec:
storageClassName: rook-ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 50Gi
grafana:
enabled: false # Grafana is instead deployed in its own namespace in the core-apps repo
discord:
enabled: true
secret:
name: discord-webhook
key: webhook
username: "Alertmanager"
verbose: true
+29
View File
@@ -0,0 +1,29 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
rules:
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
- pods/status
verbs:
- get
- list
- update
- watch
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- update
@@ -1,12 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: prometheus-agent
name: multus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus-agent
name: multus
subjects:
- kind: ServiceAccount
name: prometheus-agent
namespace: monitoring
name: multus
namespace: kube-system
+20
View File
@@ -0,0 +1,20 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: multus-daemon-config
namespace: kube-system
labels:
tier: node
app: multus
data:
daemon-config.json: |
{
"chrootDir": "/hostroot",
"cniVersion": "0.3.1",
"logLevel": "verbose",
"logToStderr": true,
"cniConfigDir": "/host/etc/cni/net.d",
"multusAutoconfigDir": "/host/etc/cni/net.d",
"multusConfigFile": "auto",
"socketDir": "/host/run/multus/"
}
+44
View File
@@ -0,0 +1,44 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
scope: Namespaced
names:
plural: network-attachment-definitions
singular: network-attachment-definition
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing
Working Group to express the intent for attaching pods to one or more logical or physical
networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec'
type: object
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this represen
tation of an object. Servers should convert recognized schemas to the
latest internal value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment'
type: object
properties:
config:
description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration'
type: string
+132
View File
@@ -0,0 +1,132 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-multus-ds
namespace: kube-system
labels:
tier: node
app: multus
name: multus
spec:
selector:
matchLabels:
name: multus
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: multus
name: multus
spec:
hostNetwork: true
hostPID: true
tolerations:
- operator: Exists
effect: NoSchedule
- operator: Exists
effect: NoExecute
serviceAccountName: multus
containers:
- name: kube-multus
image: ghcr.io/k8snetworkplumbingwg/multus-cni:v4.2.3-thick
command: [ "/usr/src/multus-cni/bin/multus-daemon" ]
resources:
requests:
cpu: "100m"
memory: "64Mi"
limits:
cpu: "200m"
memory: "256Mi"
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
# multus-daemon expects that cnibin path must be identical between pod and container host.
# e.g. if the cni bin is in '/opt/cni/bin' on the container host side, then it should be mount to '/opt/cni/bin' in multus-daemon,
# not to any other directory, like '/opt/bin' or '/usr/bin'.
- name: cnibin
mountPath: /opt/cni/bin
- name: host-run
mountPath: /host/run
- name: host-var-lib-cni-multus
mountPath: /var/lib/cni/multus
- name: host-var-lib-kubelet
mountPath: /var/lib/kubelet
mountPropagation: HostToContainer
- name: host-run-k8s-cni-cncf-io
mountPath: /run/k8s.cni.cncf.io
- name: host-run-netns
mountPath: /run/netns
mountPropagation: HostToContainer
- name: multus-daemon-config
mountPath: /etc/cni/net.d/multus.d
readOnly: true
- name: hostroot
mountPath: /hostroot
mountPropagation: HostToContainer
- mountPath: /etc/cni/multus/net.d
name: multus-conf-dir
env:
- name: MULTUS_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
initContainers:
- name: install-multus-binary
image: ghcr.io/k8snetworkplumbingwg/multus-cni:v4.2.3-thick
command:
- "/usr/src/multus-cni/bin/install_multus"
- "-d"
- "/host/opt/cni/bin"
- "-t"
- "thick"
resources:
requests:
cpu: "10m"
memory: "15Mi"
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: cnibin
mountPath: /host/opt/cni/bin
mountPropagation: Bidirectional
terminationGracePeriodSeconds: 10
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: /opt/cni/bin
- name: hostroot
hostPath:
path: /
- name: multus-daemon-config
configMap:
name: multus-daemon-config
items:
- key: daemon-config.json
path: daemon-config.json
- name: host-run
hostPath:
path: /run
- name: host-var-lib-cni-multus
hostPath:
path: /var/lib/cni/multus
- name: host-var-lib-kubelet
hostPath:
path: /var/lib/kubelet
- name: host-run-k8s-cni-cncf-io
hostPath:
path: /run/k8s.cni.cncf.io
- name: host-run-netns
hostPath:
path: /var/run/netns/
- name: multus-conf-dir
hostPath:
path: /etc/cni/multus/net.d
+32
View File
@@ -0,0 +1,32 @@
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: kubevirt-nad
spec:
config: '{
"cniVersion": "0.4.0",
"type": "bridge",
"bridge": "br0",
"ipam": {
"type": "whereabouts",
"range": "10.105.20.0/24",
"exclude": [
"10.105.20.1/32",
"10.105.20.254/32",
"10.105.20.253/32",
"10.105.20.252/32",
"10.105.20.251/32",
"10.105.20.250/32",
"10.105.20.249/32"
],
"routes": [
{
"dst": "0.0.0.0/0",
"gw": "10.105.20.1"
}
],
"dns": {
"nameservers": ["10.10.10.10"]
}
}
}'
+5
View File
@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: multus
namespace: kube-system
+1 -1
View File
@@ -21,7 +21,7 @@ spec:
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
# If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v19.2.1-20250202
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
image: quay.io/ceph/ceph:v19.2.1
image: quay.io/ceph/ceph:v20.2.0-20251104
# Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported.
# Future versions such as Tentacle (v20) would require this to be set to `true`.
# Do not set to true in production.
+1 -1
View File
@@ -24,5 +24,5 @@ appVersion: "1.0"
dependencies:
- name: rook-ceph
version: v1.17.7
version: v1.19.4
repository: https://charts.rook.io/release
@@ -0,0 +1,842 @@
{{- if and .Values.monitoring.enabled -}}
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: prometheus-pvc-rules
namespace: {{ .Release.Namespace }}
spec:
groups:
- name: persistent-volume-alert.rules
rules:
- alert: PersistentVolumeUsageNearFull
annotations:
description: PVC {{ "{{" }} $labels.persistentvolumeclaim {{ "}}" }} utilization has crossed 75%. Free up some space or expand the PVC.
message: PVC {{ "{{" }} $labels.persistentvolumeclaim {{ "}}" }} is nearing full. Data deletion or PVC expansion is required.
severity_level: warning
storage_type: ceph
expr: |
(kubelet_volume_stats_used_bytes * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"})) / (kubelet_volume_stats_capacity_bytes * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"})) > 0.75
for: 5s
labels:
severity: warning
- alert: PersistentVolumeUsageCritical
annotations:
description: PVC {{ "{{" }} $labels.persistentvolumeclaim {{ "}}" }} utilization has crossed 85%. Free up some space or expand the PVC immediately.
message: PVC {{ "{{" }} $labels.persistentvolumeclaim {{ "}}" }} is critically full. Data deletion or PVC expansion is required.
severity_level: error
storage_type: ceph
expr: |
(kubelet_volume_stats_used_bytes * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"})) / (kubelet_volume_stats_capacity_bytes * on (namespace,persistentvolumeclaim) group_left(storageclass, provisioner) (kube_persistentvolumeclaim_info * on (storageclass) group_left(provisioner) kube_storageclass_info {provisioner=~"(.*rbd.csi.ceph.com)|(.*cephfs.csi.ceph.com)"})) > 0.85
for: 5s
labels:
severity: critical
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: prometheus-ceph-rules
namespace: {{ .Release.Namespace }}
spec:
groups:
- name: "cluster health"
rules:
- alert: "CephHealthError"
annotations:
description: "The cluster state has been HEALTH_ERROR for more than 5 minutes. Please check 'ceph health detail' for more information."
summary: "Ceph is in the ERROR state"
expr: "ceph_health_status == 2"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.2.1"
severity: "critical"
type: "ceph_default"
- alert: "CephHealthWarning"
annotations:
description: "The cluster state has been HEALTH_WARN for more than 15 minutes. Please check 'ceph health detail' for more information."
summary: "Ceph is in the WARNING state"
expr: "ceph_health_status == 1"
for: "15m"
labels:
severity: "warning"
type: "ceph_default"
- name: "mon"
rules:
- alert: "CephMonDownQuorumAtRisk"
annotations:
description: "{{ "{{" }} $min := query \"floor(count(ceph_mon_metadata) / 2) + 1\" | first | value {{ "}}" }}Quorum requires a majority of monitors (x {{ "{{" }} $min {{ "}}" }}) to be active. Without quorum the cluster will become inoperable, affecting all services and connected clients. The following monitors are down: {{ "{{" }}- range query \"(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)\" {{ "}}" }} - {{ "{{" }} .Labels.ceph_daemon {{ "}}" }} on {{ "{{" }} .Labels.hostname {{ "}}" }} {{ "{{" }}- end {{ "}}" }}"
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down"
summary: "Monitor quorum is at risk"
expr: |
(
(ceph_health_detail{name="MON_DOWN"} == 1) * on() (
count(ceph_mon_quorum_status == 1) == bool (floor(count(ceph_mon_metadata) / 2) + 1)
)
) == 1
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.3.1"
severity: "critical"
type: "ceph_default"
- alert: "CephMonDown"
annotations:
description: |
{{ "{{" }} $down := query "count(ceph_mon_quorum_status == 0)" | first | value {{ "}}" }}{{ "{{" }} $s := "" {{ "}}" }}{{ "{{" }} if gt $down 1.0 {{ "}}" }}{{ "{{" }} $s = "s" {{ "}}" }}{{ "{{" }} end {{ "}}" }}You have {{ "{{" }} $down {{ "}}" }} monitor{{ "{{" }} $s {{ "}}" }} down. Quorum is still intact, but the loss of an additional monitor will make your cluster inoperable. The following monitors are down: {{ "{{" }}- range query "(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)" {{ "}}" }} - {{ "{{" }} .Labels.ceph_daemon {{ "}}" }} on {{ "{{" }} .Labels.hostname {{ "}}" }} {{ "{{" }}- end {{ "}}" }}
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down"
summary: "One or more monitors down"
expr: |
count(ceph_mon_quorum_status == 0) <= (count(ceph_mon_metadata) - floor(count(ceph_mon_metadata) / 2) + 1)
for: "30s"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephMonDiskspaceCritical"
annotations:
description: "The free space available to a monitor's store is critically low. You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; {{ "{{" }}- range query \"ceph_mon_metadata\"{{ "}}" }} - {{ "{{" }} .Labels.hostname {{ "}}" }} {{ "{{" }}- end {{ "}}" }}"
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-crit"
summary: "Filesystem space on at least one monitor is critically low"
expr: "ceph_health_detail{name=\"MON_DISK_CRIT\"} == 1"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.3.2"
severity: "critical"
type: "ceph_default"
- alert: "CephMonDiskspaceLow"
annotations:
description: "The space available to a monitor's store is approaching full (>70% is the default). You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; {{ "{{" }}- range query \"ceph_mon_metadata\"{{ "}}" }} - {{ "{{" }} .Labels.hostname {{ "}}" }} {{ "{{" }}- end {{ "}}" }}"
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-low"
summary: "Drive space on at least one monitor is approaching full"
expr: "ceph_health_detail{name=\"MON_DISK_LOW\"} == 1"
for: "5m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephMonClockSkew"
annotations:
description: "Ceph monitors rely on closely synchronized time to maintain quorum and cluster consistency. This event indicates that the time on at least one mon has drifted too far from the lead mon. Review cluster status with ceph -s. This will show which monitors are affected. Check the time sync status on each monitor host with 'ceph time-sync-status' and the state and peers of your ntpd or chrony daemon."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-clock-skew"
summary: "Clock skew detected among monitors"
expr: "ceph_health_detail{name=\"MON_CLOCK_SKEW\"} == 1"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- name: "osd"
rules:
- alert: "CephOSDDownHigh"
annotations:
description: "{{ "{{" }} $value | humanize {{ "}}" }}% or {{ "{{" }} with query \"count(ceph_osd_up == 0)\" {{ "}}" }}{{ "{{" }} . | first | value {{ "}}" }}{{ "{{" }} end {{ "}}" }} of {{ "{{" }} with query \"count(ceph_osd_up)\" {{ "}}" }}{{ "{{" }} . | first | value {{ "}}" }}{{ "{{" }} end {{ "}}" }} OSDs are down (>= 10%). The following OSDs are down: {{ "{{" }}- range query \"(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0\" {{ "}}" }} - {{ "{{" }} .Labels.ceph_daemon {{ "}}" }} on {{ "{{" }} .Labels.hostname {{ "}}" }} {{ "{{" }}- end {{ "}}" }}"
summary: "More than 10% of OSDs are down"
expr: "count(ceph_osd_up == 0) / count(ceph_osd_up) * 100 >= 10"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.1"
severity: "critical"
type: "ceph_default"
- alert: "CephOSDHostDown"
annotations:
description: "The following OSDs are down: {{ "{{" }}- range query \"(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0\" {{ "}}" }} - {{ "{{" }} .Labels.hostname {{ "}}" }} : {{ "{{" }} .Labels.ceph_daemon {{ "}}" }} {{ "{{" }}- end {{ "}}" }}"
summary: "An OSD host is offline"
expr: "ceph_health_detail{name=\"OSD_HOST_DOWN\"} == 1"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.8"
severity: "warning"
type: "ceph_default"
- alert: "CephOSDDown"
annotations:
description: |
{{ "{{" }} $num := query "count(ceph_osd_up == 0)" | first | value {{ "}}" }}{{ "{{" }} $s := "" {{ "}}" }}{{ "{{" }} if gt $num 1.0 {{ "}}" }}{{ "{{" }} $s = "s" {{ "}}" }}{{ "{{" }} end {{ "}}" }}{{ "{{" }} $num {{ "}}" }} OSD{{ "{{" }} $s {{ "}}" }} down for over 5mins. The following OSD{{ "{{" }} $s {{ "}}" }} {{ "{{" }} if eq $s "" {{ "}}" }}is{{ "{{" }} else {{ "}}" }}are{{ "{{" }} end {{ "}}" }} down: {{ "{{" }}- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0"{{ "}}" }} - {{ "{{" }} .Labels.ceph_daemon {{ "}}" }} on {{ "{{" }} .Labels.hostname {{ "}}" }} {{ "{{" }}- end {{ "}}" }}
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-down"
summary: "An OSD has been marked down"
expr: "ceph_health_detail{name=\"OSD_DOWN\"} == 1"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.2"
severity: "warning"
type: "ceph_default"
- alert: "CephOSDNearFull"
annotations:
description: "One or more OSDs have reached the NEARFULL threshold. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-nearfull"
summary: "OSD(s) running low on free space (NEARFULL)"
expr: "ceph_health_detail{name=\"OSD_NEARFULL\"} == 1"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.3"
severity: "warning"
type: "ceph_default"
- alert: "CephOSDFull"
annotations:
description: "An OSD has reached the FULL threshold. Writes to pools that share the affected OSD will be blocked. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-full"
summary: "OSD full, writes blocked"
expr: "ceph_health_detail{name=\"OSD_FULL\"} > 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.6"
severity: "critical"
type: "ceph_default"
- alert: "CephOSDBackfillFull"
annotations:
description: "An OSD has reached the BACKFILL FULL threshold. This will prevent rebalance operations from completing. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-backfillfull"
summary: "OSD(s) too full for backfill operations"
expr: "ceph_health_detail{name=\"OSD_BACKFILLFULL\"} > 0"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephOSDTooManyRepairs"
annotations:
description: "Reads from an OSD have used a secondary PG to return data to the client, indicating a potential failing drive."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-too-many-repairs"
summary: "OSD reports a high number of read errors"
expr: "ceph_health_detail{name=\"OSD_TOO_MANY_REPAIRS\"} == 1"
for: "30s"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephOSDTimeoutsPublicNetwork"
annotations:
description: "OSD heartbeats on the cluster's 'public' network (frontend) are running slow. Investigate the network for latency or loss issues. Use 'ceph health detail' to show the affected OSDs."
summary: "Network issues delaying OSD heartbeats (public network)"
expr: "ceph_health_detail{name=\"OSD_SLOW_PING_TIME_FRONT\"} == 1"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephOSDTimeoutsClusterNetwork"
annotations:
description: "OSD heartbeats on the cluster's 'cluster' network (backend) are slow. Investigate the network for latency issues on this subnet. Use 'ceph health detail' to show the affected OSDs."
summary: "Network issues delaying OSD heartbeats (cluster network)"
expr: "ceph_health_detail{name=\"OSD_SLOW_PING_TIME_BACK\"} == 1"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephOSDInternalDiskSizeMismatch"
annotations:
description: "One or more OSDs have an internal inconsistency between metadata and the size of the device. This could lead to the OSD(s) crashing in future. You should redeploy the affected OSDs."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-disk-size-mismatch"
summary: "OSD size inconsistency error"
expr: "ceph_health_detail{name=\"BLUESTORE_DISK_SIZE_MISMATCH\"} == 1"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephDeviceFailurePredicted"
annotations:
description: "The device health module has determined that one or more devices will fail soon. To review device status use 'ceph device ls'. To show a specific device use 'ceph device info <dev id>'. Mark the OSD out so that data may migrate to other OSDs. Once the OSD has drained, destroy the OSD, replace the device, and redeploy the OSD."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#id2"
summary: "Device(s) predicted to fail soon"
expr: "ceph_health_detail{name=\"DEVICE_HEALTH\"} == 1"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephDeviceFailurePredictionTooHigh"
annotations:
description: "The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availability. Prevent data integrity issues by adding new OSDs so that data may be relocated."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-toomany"
summary: "Too many devices are predicted to fail, unable to resolve"
expr: "ceph_health_detail{name=\"DEVICE_HEALTH_TOOMANY\"} == 1"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.7"
severity: "critical"
type: "ceph_default"
- alert: "CephDeviceFailureRelocationIncomplete"
annotations:
description: "The device health module has determined that one or more devices will fail soon, but the normal process of relocating the data on the device to other OSDs in the cluster is blocked. \nEnsure that the cluster has available free space. It may be necessary to add capacity to the cluster to allow data from the failing device to successfully migrate, or to enable the balancer."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-in-use"
summary: "Device failure is predicted, but unable to relocate data"
expr: "ceph_health_detail{name=\"DEVICE_HEALTH_IN_USE\"} == 1"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephOSDFlapping"
annotations:
description: "OSD {{ "{{" }} $labels.ceph_daemon {{ "}}" }} on {{ "{{" }} $labels.hostname {{ "}}" }} was marked down and back up {{ "{{" }} $value | humanize {{ "}}" }} times once a minute for 5 minutes. This may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster network, or the public network if no cluster network is deployed. Check the network stats on the listed host(s)."
documentation: "https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds"
summary: "Network issues are causing OSDs to flap (mark each other down)"
expr: "(rate(ceph_osd_up[5m]) * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) * 60 > 1"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.4"
severity: "warning"
type: "ceph_default"
- alert: "CephOSDReadErrors"
annotations:
description: "An OSD has encountered read errors, but the OSD has recovered by retrying the reads. This may indicate an issue with hardware or the kernel."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-spurious-read-errors"
summary: "Device read errors detected"
expr: "ceph_health_detail{name=\"BLUESTORE_SPURIOUS_READ_ERRORS\"} == 1"
for: "30s"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephPGImbalance"
annotations:
description: "OSD {{ "{{" }} $labels.ceph_daemon {{ "}}" }} on {{ "{{" }} $labels.hostname {{ "}}" }} deviates by more than 30% from average PG count."
summary: "PGs are not balanced across OSDs"
expr: |
abs(
((ceph_osd_numpg > 0) - on (job) group_left avg(ceph_osd_numpg > 0) by (job)) /
on (job) group_left avg(ceph_osd_numpg > 0) by (job)
) * on (ceph_daemon) group_left(hostname) ceph_osd_metadata > 0.30
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.5"
severity: "warning"
type: "ceph_default"
- name: "mds"
rules:
- alert: "CephFilesystemDamaged"
annotations:
description: "Filesystem metadata has been corrupted. Data may be inaccessible. Analyze metrics from the MDS daemon admin socket, or escalate to support."
documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages"
summary: "CephFS filesystem is damaged."
expr: "ceph_health_detail{name=\"MDS_DAMAGE\"} > 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.5.1"
severity: "critical"
type: "ceph_default"
- alert: "CephFilesystemOffline"
annotations:
description: "All MDS ranks are unavailable. The MDS daemons managing metadata are down, rendering the filesystem offline."
documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-all-down"
summary: "CephFS filesystem is offline"
expr: "ceph_health_detail{name=\"MDS_ALL_DOWN\"} > 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.5.3"
severity: "critical"
type: "ceph_default"
- alert: "CephFilesystemDegraded"
annotations:
description: "One or more metadata daemons (MDS ranks) are failed or in a damaged state. At best the filesystem is partially available, at worst the filesystem is completely unusable."
documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-degraded"
summary: "CephFS filesystem is degraded"
expr: "ceph_health_detail{name=\"FS_DEGRADED\"} > 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.5.4"
severity: "critical"
type: "ceph_default"
- alert: "CephFilesystemMDSRanksLow"
annotations:
description: "The filesystem's 'max_mds' setting defines the number of MDS ranks in the filesystem. The current number of active MDS daemons is less than this value."
documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-up-less-than-max"
summary: "Ceph MDS daemon count is lower than configured"
expr: "ceph_health_detail{name=\"MDS_UP_LESS_THAN_MAX\"} > 0"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephFilesystemInsufficientStandby"
annotations:
description: "The minimum number of standby daemons required by standby_count_wanted is less than the current number of standby daemons. Adjust the standby count or increase the number of MDS daemons."
documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-insufficient-standby"
summary: "Ceph filesystem standby daemons too few"
expr: "ceph_health_detail{name=\"MDS_INSUFFICIENT_STANDBY\"} > 0"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephFilesystemFailureNoStandby"
annotations:
description: "An MDS daemon has failed, leaving only one active rank and no available standby. Investigate the cause of the failure or add a standby MDS."
documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-with-failed-mds"
summary: "MDS daemon failed, no further standby available"
expr: "ceph_health_detail{name=\"FS_WITH_FAILED_MDS\"} > 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.5.5"
severity: "critical"
type: "ceph_default"
- alert: "CephFilesystemReadOnly"
annotations:
description: "The filesystem has switched to READ ONLY due to an unexpected error when writing to the metadata pool. Either analyze the output from the MDS daemon admin socket, or escalate to support."
documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages"
summary: "CephFS filesystem in read only mode due to write error(s)"
expr: "ceph_health_detail{name=\"MDS_HEALTH_READ_ONLY\"} > 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.5.2"
severity: "critical"
type: "ceph_default"
- name: "mgr"
rules:
- alert: "CephMgrModuleCrash"
annotations:
description: "One or more mgr modules have crashed and have yet to be acknowledged by an administrator. A crashed module may impact functionality within the cluster. Use the 'ceph crash' command to determine which module has failed, and archive it to acknowledge the failure."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#recent-mgr-module-crash"
summary: "A manager module has recently crashed"
expr: "ceph_health_detail{name=\"RECENT_MGR_MODULE_CRASH\"} == 1"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.6.1"
severity: "critical"
type: "ceph_default"
- alert: "CephMgrPrometheusModuleInactive"
annotations:
description: "The mgr/prometheus module at {{ "{{" }} $labels.instance {{ "}}" }} is unreachable. This could mean that the module has been disabled or the mgr daemon itself is down. Without the mgr/prometheus module metrics and alerts will no longer function. Open a shell to an admin node or toolbox pod and use 'ceph -s' to to determine whether the mgr is active. If the mgr is not active, restart it, otherwise you can determine module status with 'ceph mgr module ls'. If it is not listed as enabled, enable it with 'ceph mgr module enable prometheus'."
summary: "The mgr/prometheus module is not available"
expr: "up{job=\"ceph\"} == 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.6.2"
severity: "critical"
type: "ceph_default"
- name: "pgs"
rules:
- alert: "CephPGsInactive"
annotations:
description: "{{ "{{" }} $value {{ "}}" }} PGs have been inactive for more than 5 minutes in pool {{ "{{" }} $labels.name {{ "}}" }}. Inactive placement groups are not able to serve read/write requests."
summary: "One or more placement groups are inactive"
expr: "ceph_pool_metadata * on(pool_id,instance) group_left() (ceph_pg_total - ceph_pg_active) > 0"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.7.1"
severity: "critical"
type: "ceph_default"
- alert: "CephPGsUnclean"
annotations:
description: "{{ "{{" }} $value {{ "}}" }} PGs have been unclean for more than 15 minutes in pool {{ "{{" }} $labels.name {{ "}}" }}. Unclean PGs have not recovered from a previous failure."
summary: "One or more placement groups are marked unclean"
expr: "ceph_pool_metadata * on(pool_id,instance) group_left() (ceph_pg_total - ceph_pg_clean) > 0"
for: "15m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.7.2"
severity: "warning"
type: "ceph_default"
- alert: "CephPGsDamaged"
annotations:
description: "During data consistency checks (scrub), at least one PG has been flagged as being damaged or inconsistent. Check to see which PG is affected, and attempt a manual repair if necessary. To list problematic placement groups, use 'rados list-inconsistent-pg <pool>'. To repair PGs use the 'ceph pg repair <pg_num>' command."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-damaged"
summary: "Placement group damaged, manual intervention needed"
expr: "ceph_health_detail{name=~\"PG_DAMAGED|OSD_SCRUB_ERRORS\"} == 1"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.7.4"
severity: "critical"
type: "ceph_default"
- alert: "CephPGRecoveryAtRisk"
annotations:
description: "Data redundancy is at risk since one or more OSDs are at or above the 'full' threshold. Add more capacity to the cluster, restore down/out OSDs, or delete unwanted data."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-recovery-full"
summary: "OSDs are too full for recovery"
expr: "ceph_health_detail{name=\"PG_RECOVERY_FULL\"} == 1"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.7.5"
severity: "critical"
type: "ceph_default"
- alert: "CephPGUnavailableBlockingIO"
annotations:
description: "Data availability is reduced, impacting the cluster's ability to service I/O. One or more placement groups (PGs) are in a state that blocks I/O."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-availability"
summary: "PG is unavailable, blocking I/O"
expr: "((ceph_health_detail{name=\"PG_AVAILABILITY\"} == 1) - scalar(ceph_health_detail{name=\"OSD_DOWN\"})) == 1"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.7.3"
severity: "critical"
type: "ceph_default"
- alert: "CephPGBackfillAtRisk"
annotations:
description: "Data redundancy may be at risk due to lack of free space within the cluster. One or more OSDs have reached the 'backfillfull' threshold. Add more capacity, or delete unwanted data."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-backfill-full"
summary: "Backfill operations are blocked due to lack of free space"
expr: "ceph_health_detail{name=\"PG_BACKFILL_FULL\"} == 1"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.7.6"
severity: "critical"
type: "ceph_default"
- alert: "CephPGNotScrubbed"
annotations:
description: "One or more PGs have not been scrubbed recently. Scrubs check metadata integrity, protecting against bit-rot. They check that metadata is consistent across data replicas. When PGs miss their scrub interval, it may indicate that the scrub window is too small, or PGs were not in a 'clean' state during the scrub window. You can manually initiate a scrub with: ceph pg scrub <pgid>"
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-scrubbed"
summary: "Placement group(s) have not been scrubbed"
expr: "ceph_health_detail{name=\"PG_NOT_SCRUBBED\"} == 1"
for: "5m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephPGsHighPerOSD"
annotations:
description: "The number of placement groups per OSD is too high (exceeds the mon_max_pg_per_osd setting).\n Check that the pg_autoscaler has not been disabled for any pools with 'ceph osd pool autoscale-status', and that the profile selected is appropriate. You may also adjust the target_size_ratio of a pool to guide the autoscaler based on the expected relative size of the pool ('ceph osd pool set cephfs.cephfs.meta target_size_ratio .1') or set the pg_autoscaler mode to 'warn' and adjust pg_num appropriately for one or more pools."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks/#too-many-pgs"
summary: "Placement groups per OSD is too high"
expr: "ceph_health_detail{name=\"TOO_MANY_PGS\"} == 1"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephPGNotDeepScrubbed"
annotations:
description: "One or more PGs have not been deep scrubbed recently. Deep scrubs protect against bit-rot. They compare data replicas to ensure consistency. When PGs miss their deep scrub interval, it may indicate that the window is too small or PGs were not in a 'clean' state during the deep-scrub window."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-deep-scrubbed"
summary: "Placement group(s) have not been deep scrubbed"
expr: "ceph_health_detail{name=\"PG_NOT_DEEP_SCRUBBED\"} == 1"
for: "5m"
labels:
severity: "warning"
type: "ceph_default"
- name: "nodes"
rules:
- alert: "CephNodeRootFilesystemFull"
annotations:
description: "Root volume is dangerously full: {{ "{{" }} $value | humanize {{ "}}" }}% free."
summary: "Root filesystem is dangerously full"
expr: "node_filesystem_avail_bytes{mountpoint=\"/\"} / node_filesystem_size_bytes{mountpoint=\"/\"} * 100 < 5"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.8.1"
severity: "critical"
type: "ceph_default"
- name: "pools"
rules:
- alert: "CephPoolGrowthWarning"
annotations:
description: "Pool '{{ "{{" }} $labels.name {{ "}}" }}' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours."
summary: "Pool growth rate may soon exceed capacity"
expr: "(predict_linear(ceph_pool_percent_used[2d], 3600 * 24 * 5) * on(pool_id, instance, pod) group_right() ceph_pool_metadata) >= 95"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.9.2"
severity: "warning"
type: "ceph_default"
- alert: "CephPoolBackfillFull"
annotations:
description: "A pool is approaching the near full threshold, which will prevent recovery/backfill operations from completing. Consider adding more capacity."
summary: "Free space in a pool is too low for recovery/backfill"
expr: "ceph_health_detail{name=\"POOL_BACKFILLFULL\"} > 0"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephPoolFull"
annotations:
description: "A pool has reached its MAX quota, or OSDs supporting the pool have reached the FULL threshold. Until this is resolved, writes to the pool will be blocked. Pool Breakdown (top 5) {{ "{{" }}- range query \"topk(5, sort_desc(ceph_pool_percent_used * on(pool_id) group_right ceph_pool_metadata))\" {{ "}}" }} - {{ "{{" }} .Labels.name {{ "}}" }} at {{ "{{" }} .Value {{ "}}" }}% {{ "{{" }}- end {{ "}}" }} Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota <pool_name> max_bytes <bytes>)"
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pool-full"
summary: "Pool is full - writes are blocked"
expr: "ceph_health_detail{name=\"POOL_FULL\"} > 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.9.1"
severity: "critical"
type: "ceph_default"
- alert: "CephPoolNearFull"
annotations:
description: "A pool has exceeded the warning (percent full) threshold, or OSDs supporting the pool have reached the NEARFULL threshold. Writes may continue, but you are at risk of the pool going read-only if more capacity isn't made available. Determine the affected pool with 'ceph df detail', looking at QUOTA BYTES and STORED. Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota <pool_name> max_bytes <bytes>). Also ensure that the balancer is active."
summary: "One or more Ceph pools are nearly full"
expr: "ceph_health_detail{name=\"POOL_NEAR_FULL\"} > 0"
for: "5m"
labels:
severity: "warning"
type: "ceph_default"
- name: "healthchecks"
rules:
- alert: "CephSlowOps"
annotations:
description: "{{ "{{" }} $value {{ "}}" }} OSD requests are taking too long to process (osd_op_complaint_time exceeded)"
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops"
summary: "OSD operations are slow to complete"
expr: "ceph_healthcheck_slow_ops > 0"
for: "30s"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephDaemonSlowOps"
annotations:
description: "{{ "{{" }} $labels.ceph_daemon {{ "}}" }} operations are taking too long to process (complaint time exceeded)"
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops"
summary: "{{ "{{" }} $labels.ceph_daemon {{ "}}" }} operations are slow to complete"
expr: "ceph_daemon_health_metrics{type=\"SLOW_OPS\"} > 0"
for: "30s"
labels:
severity: "warning"
type: "ceph_default"
- name: "hardware"
rules:
- alert: "HardwareStorageError"
annotations:
description: "Some storage devices are in error. Check `ceph health detail`."
summary: "Storage devices error(s) detected"
expr: "ceph_health_detail{name=\"HARDWARE_STORAGE\"} > 0"
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.13.1"
severity: "critical"
type: "ceph_default"
- alert: "HardwareMemoryError"
annotations:
description: "DIMM error(s) detected. Check `ceph health detail`."
summary: "DIMM error(s) detected"
expr: "ceph_health_detail{name=\"HARDWARE_MEMORY\"} > 0"
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.13.2"
severity: "critical"
type: "ceph_default"
- alert: "HardwareProcessorError"
annotations:
description: "Processor error(s) detected. Check `ceph health detail`."
summary: "Processor error(s) detected"
expr: "ceph_health_detail{name=\"HARDWARE_PROCESSOR\"} > 0"
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.13.3"
severity: "critical"
type: "ceph_default"
- alert: "HardwareNetworkError"
annotations:
description: "Network error(s) detected. Check `ceph health detail`."
summary: "Network error(s) detected"
expr: "ceph_health_detail{name=\"HARDWARE_NETWORK\"} > 0"
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.13.4"
severity: "critical"
type: "ceph_default"
- alert: "HardwarePowerError"
annotations:
description: "Power supply error(s) detected. Check `ceph health detail`."
summary: "Power supply error(s) detected"
expr: "ceph_health_detail{name=\"HARDWARE_POWER\"} > 0"
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.13.5"
severity: "critical"
type: "ceph_default"
- alert: "HardwareFanError"
annotations:
description: "Fan error(s) detected. Check `ceph health detail`."
summary: "Fan error(s) detected"
expr: "ceph_health_detail{name=\"HARDWARE_FANS\"} > 0"
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.13.6"
severity: "critical"
type: "ceph_default"
- name: "PrometheusServer"
rules:
- alert: "PrometheusJobMissing"
annotations:
description: "The prometheus job that scrapes from Ceph MGR is no longer defined, this will effectively mean you'll have no metrics or alerts for the cluster. Please review the job definitions in the prometheus.yml file of the prometheus instance."
summary: "The scrape job for Ceph MGR is missing from Prometheus"
expr: "absent(up{job=\"rook-ceph-mgr\"})"
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.12.1"
severity: "critical"
type: "ceph_default"
- alert: "PrometheusJobExporterMissing"
annotations:
description: "The prometheus job that scrapes from Ceph Exporter is no longer defined, this will effectively mean you'll have no metrics or alerts for the cluster. Please review the job definitions in the prometheus.yml file of the prometheus instance."
summary: "The scrape job for Ceph Exporter is missing from Prometheus"
expr: "sum(absent(up{job=\"rook-ceph-exporter\"})) and sum(ceph_osd_metadata{ceph_version=~\"^ceph version (1[89]|[2-9][0-9]).*\"}) > 0"
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.12.1"
severity: "critical"
type: "ceph_default"
- name: "rados"
rules:
- alert: "CephObjectMissing"
annotations:
description: "The latest version of a RADOS object can not be found, even though all OSDs are up. I/O requests for this object from clients will block (hang). Resolving this issue may require the object to be rolled back to a prior version manually, and manually verified."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#object-unfound"
summary: "Object(s) marked UNFOUND"
expr: "(ceph_health_detail{name=\"OBJECT_UNFOUND\"} == 1) * on() (count(ceph_osd_up == 1) == bool count(ceph_osd_metadata)) == 1"
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.10.1"
severity: "critical"
type: "ceph_default"
- name: "generic"
rules:
- alert: "CephDaemonCrash"
annotations:
description: "One or more daemons have crashed recently, and need to be acknowledged. This notification ensures that software crashes do not go unseen. To acknowledge a crash, use the 'ceph crash archive <id>' command."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks/#recent-crash"
summary: "One or more Ceph daemons have crashed, and are pending acknowledgement"
expr: "ceph_health_detail{name=\"RECENT_CRASH\"} == 1"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.1.2"
severity: "critical"
type: "ceph_default"
- name: "rbdmirror"
rules:
- alert: "CephRBDMirrorImagesPerDaemonHigh"
annotations:
description: "Number of image replications per daemon is not supposed to go beyond threshold 100"
summary: "Number of image replications are now above 100"
expr: "sum by (ceph_daemon, namespace) (ceph_rbd_mirror_snapshot_image_snapshots) > 100"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.10.2"
severity: "critical"
type: "ceph_default"
- alert: "CephRBDMirrorImagesNotInSync"
annotations:
description: "Both local and remote RBD mirror images should be in sync."
summary: "Some of the RBD mirror images are not in sync with the remote counter parts."
expr: "sum by (ceph_daemon, image, namespace, pool) (topk by (ceph_daemon, image, namespace, pool) (1, ceph_rbd_mirror_snapshot_image_local_timestamp) - topk by (ceph_daemon, image, namespace, pool) (1, ceph_rbd_mirror_snapshot_image_remote_timestamp)) != 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.10.3"
severity: "critical"
type: "ceph_default"
- alert: "CephRBDMirrorImagesNotInSyncVeryHigh"
annotations:
description: "More than 10% of the images have synchronization problems"
summary: "Number of unsynchronized images are very high."
expr: "count by (ceph_daemon) ((topk by (ceph_daemon, image, namespace, pool) (1, ceph_rbd_mirror_snapshot_image_local_timestamp) - topk by (ceph_daemon, image, namespace, pool) (1, ceph_rbd_mirror_snapshot_image_remote_timestamp)) != 0) > (sum by (ceph_daemon) (ceph_rbd_mirror_snapshot_snapshots)*.1)"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.10.4"
severity: "critical"
type: "ceph_default"
- alert: "CephRBDMirrorImageTransferBandwidthHigh"
annotations:
description: "Detected a heavy increase in bandwidth for rbd replications (over 80%) in the last 30 min. This might not be a problem, but it is good to review the number of images being replicated simultaneously"
summary: "The replication network usage has been increased over 80% in the last 30 minutes. Review the number of images being replicated. This alert will be cleaned automatically after 30 minutes"
expr: "rate(ceph_rbd_mirror_journal_replay_bytes[30m]) > 0.80"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.10.5"
severity: "warning"
type: "ceph_default"
- name: "nvmeof"
rules:
- alert: "NVMeoFSubsystemNamespaceLimit"
annotations:
description: "Subsystems have a max namespace limit defined at creation time. This alert means that no more namespaces can be added to {{ "{{" }} $labels.nqn {{ "}}" }}"
summary: "{{ "{{" }} $labels.nqn {{ "}}" }} subsystem has reached its maximum number of namespaces "
expr: "(count by(nqn) (ceph_nvmeof_subsystem_namespace_metadata)) >= ceph_nvmeof_subsystem_namespace_limit"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "NVMeoFTooManyGateways"
annotations:
description: "You may create many gateways, but 4 is the tested limit"
summary: "Max supported gateways exceeded "
expr: "count(ceph_nvmeof_gateway_info) > 4.00"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "NVMeoFMaxGatewayGroupSize"
annotations:
description: "You may create many gateways in a gateway group, but 2 is the tested limit"
summary: "Max gateways within a gateway group ({{ "{{" }} $labels.group {{ "}}" }}) exceeded "
expr: "count by(group) (ceph_nvmeof_gateway_info) > 2.00"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "NVMeoFSingleGatewayGroup"
annotations:
description: "Although a single member gateway group is valid, it should only be used for test purposes"
summary: "The gateway group {{ "{{" }} $labels.group {{ "}}" }} consists of a single gateway - HA is not possible "
expr: "count by(group) (ceph_nvmeof_gateway_info) == 1"
for: "5m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "NVMeoFHighGatewayCPU"
annotations:
description: "Typically, high CPU may indicate degraded performance. Consider increasing the number of reactor cores"
summary: "CPU used by {{ "{{" }} $labels.instance {{ "}}" }} NVMe-oF Gateway is high "
expr: "label_replace(avg by(instance) (rate(ceph_nvmeof_reactor_seconds_total{mode=\"busy\"}[1m])),\"instance\",\"$1\",\"instance\",\"(.*):.*\") > 80.00"
for: "10m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "NVMeoFGatewayOpenSecurity"
annotations:
description: "It is good practice to ensure subsystems use host security to reduce the risk of unexpected data loss"
summary: "Subsystem {{ "{{" }} $labels.nqn {{ "}}" }} has been defined without host level security "
expr: "ceph_nvmeof_subsystem_metadata{allow_any_host=\"yes\"}"
for: "5m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "NVMeoFTooManySubsystems"
annotations:
description: "Although you may continue to create subsystems in {{ "{{" }} $labels.gateway_host {{ "}}" }}, the configuration may not be supported"
summary: "The number of subsystems defined to the gateway exceeds supported values "
expr: "count by(gateway_host) (label_replace(ceph_nvmeof_subsystem_metadata,\"gateway_host\",\"$1\",\"instance\",\"(.*):.*\")) > 16.00"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "NVMeoFVersionMismatch"
annotations:
description: "This may indicate an issue with deployment. Check cephadm logs"
summary: "The cluster has different NVMe-oF gateway releases active "
expr: "count(count by(version) (ceph_nvmeof_gateway_info)) > 1"
for: "1h"
labels:
severity: "warning"
type: "ceph_default"
- alert: "NVMeoFHighClientCount"
annotations:
description: "The supported limit for clients connecting to a subsystem is 32"
summary: "The number of clients connected to {{ "{{" }} $labels.nqn {{ "}}" }} is too high "
expr: "ceph_nvmeof_subsystem_host_count > 32.00"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "NVMeoFHighHostCPU"
annotations:
description: "High CPU on a gateway host can lead to CPU contention and performance degradation"
summary: "The CPU is high ({{ "{{" }} $value {{ "}}" }}%) on NVMeoF Gateway host ({{ "{{" }} $labels.host {{ "}}" }}) "
expr: "100-((100*(avg by(host) (label_replace(rate(node_cpu_seconds_total{mode=\"idle\"}[5m]),\"host\",\"$1\",\"instance\",\"(.*):.*\")) * on(host) group_right label_replace(ceph_nvmeof_gateway_info,\"host\",\"$1\",\"instance\",\"(.*):.*\")))) >= 80.00"
for: "10m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "NVMeoFInterfaceDown"
annotations:
description: "A NIC used by one or more subsystems is in a down state"
summary: "Network interface {{ "{{" }} $labels.device {{ "}}" }} is down "
expr: "ceph_nvmeof_subsystem_listener_iface_info{operstate=\"down\"}"
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.14.1"
severity: "warning"
type: "ceph_default"
- alert: "NVMeoFInterfaceDuplex"
annotations:
description: "Until this is resolved, performance from the gateway will be degraded"
summary: "Network interface {{ "{{" }} $labels.device {{ "}}" }} is not running in full duplex mode "
expr: "ceph_nvmeof_subsystem_listener_iface_info{duplex!=\"full\"}"
for: "30s"
labels:
severity: "warning"
type: "ceph_default"
- alert: "NVMeoFHighReadLatency"
annotations:
description: "High latencies may indicate a constraint within the cluster e.g. CPU, network. Please investigate"
summary: "The average read latency over the last 5 mins has reached 10 ms or more on {{ "{{" }} $labels.gateway {{ "}}" }}"
expr: "label_replace((avg by(instance) ((rate(ceph_nvmeof_bdev_read_seconds_total[1m]) / rate(ceph_nvmeof_bdev_reads_completed_total[1m])))),\"gateway\",\"$1\",\"instance\",\"(.*):.*\") > 0.01"
for: "5m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "NVMeoFHighWriteLatency"
annotations:
description: "High latencies may indicate a constraint within the cluster e.g. CPU, network. Please investigate"
summary: "The average write latency over the last 5 mins has reached 20 ms or more on {{ "{{" }} $labels.gateway {{ "}}" }}"
expr: "label_replace((avg by(instance) ((rate(ceph_nvmeof_bdev_write_seconds_total[5m]) / rate(ceph_nvmeof_bdev_writes_completed_total[5m])))),\"gateway\",\"$1\",\"instance\",\"(.*):.*\") > 0.02"
for: "5m"
labels:
severity: "warning"
type: "ceph_default"
{{- end }}
@@ -0,0 +1,56 @@
{{- if and .Values.monitoring.enabled -}}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: rook-ceph-exporter
namespace: {{ .Release.Namespace }}
spec:
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: rook-ceph-exporter
rook_cluster: {{ .Values.monitoring.cluster_name | default "rook-ceph" }}
endpoints:
- port: ceph-exporter-http-metrics
path: /metrics
interval: 10s
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: rook-ceph-mgr
namespace: {{ .Release.Namespace }}
spec:
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: rook-ceph-mgr
rook_cluster: {{ .Values.monitoring.cluster_name | default "rook-ceph" }}
endpoints:
- port: http-metrics
path: /metrics
interval: 10s
honorLabels: true
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: csi-metrics
namespace: {{ .Release.Namespace }}
spec:
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: csi-metrics
endpoints:
- port: csi-http-metrics
path: /metrics
interval: 5s
{{- end }}
+1 -660
View File
@@ -1,661 +1,2 @@
# Default values for rook-ceph-operator
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
# -- Image
repository: docker.io/rook/ceph
# -- Image tag
# @default -- `v1.16.4`
tag: v1.17.8
# -- Image pull policy
pullPolicy: IfNotPresent
crds:
# -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
# managed independently with deploy/examples/crds.yaml.
# **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
# If the CRDs are deleted in this case, see
# [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
# to restore them.
enabled: true
# -- Pod resource requests & limits
resources:
limits:
memory: 512Mi
requests:
cpu: 200m
memory: 128Mi
# -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
nodeSelector: {}
# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# disktype: ssd
# -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
tolerations: []
# -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
# the Kubernetes default of 5 minutes
unreachableNodeTolerationSeconds: 5
# -- Whether the operator should watch cluster CRD in its own namespace or not
currentNamespaceOnly: false
# -- Custom pod labels for the operator
operatorPodLabels: {}
# -- Pod annotations
annotations: {}
# -- Global log level for the operator.
# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
logLevel: INFO
# -- If true, create & use RBAC resources
rbacEnable: true
rbacAggregate:
# -- If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims
enableOBCs: false
# -- If true, create & use PSP resources
pspEnable: false
# -- Set the priority class for the rook operator deployment if desired
priorityClassName:
# -- Set the container security context for the operator
containerSecurityContext:
runAsNonRoot: true
runAsUser: 2016
runAsGroup: 2016
capabilities:
drop: ["ALL"]
# -- If true, loop devices are allowed to be used for osds in test clusters
allowLoopDevices: false
# Settings for whether to disable the drivers or other daemons if they are not
# needed
csi:
# -- Enable Ceph CSI RBD driver
enableRbdDriver: true
# -- Enable Ceph CSI CephFS driver
enableCephfsDriver: true
# -- Disable the CSI driver.
disableCsiDriver: "false"
# -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
# in some network configurations where the SDN does not provide access to an external cluster or
# there is significant drop in read/write performance
enableCSIHostNetwork: true
# -- Enable Snapshotter in CephFS provisioner pod
enableCephfsSnapshotter: true
# -- Enable Snapshotter in NFS provisioner pod
enableNFSSnapshotter: true
# -- Enable Snapshotter in RBD provisioner pod
enableRBDSnapshotter: true
# -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
enablePluginSelinuxHostMount: false
# -- Enable Ceph CSI PVC encryption support
enableCSIEncryption: false
# -- Enable volume group snapshot feature. This feature is
# enabled by default as long as the necessary CRDs are available in the cluster.
enableVolumeGroupSnapshot: true
# -- PriorityClassName to be set on csi driver plugin pods
pluginPriorityClassName: system-node-critical
# -- PriorityClassName to be set on csi driver provisioner pods
provisionerPriorityClassName: system-cluster-critical
# -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
rbdFSGroupPolicy: "File"
# -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
cephFSFSGroupPolicy: "File"
# -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
nfsFSGroupPolicy: "File"
# -- OMAP generator generates the omap mapping between the PV name and the RBD image
# which helps CSI to identify the rbd images for CSI operations.
# `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
# By default OMAP generator is disabled and when enabled, it will be deployed as a
# sidecar with CSI provisioner pod, to enable set it to true.
enableOMAPGenerator: false
# -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
# Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
cephFSKernelMountOptions:
# -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
# Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
# Hence enable metadata is false by default
enableMetadata: false
# -- Set replicas for csi provisioner deployment
provisionerReplicas: 2
# -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
# in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
clusterName:
# -- Set logging level for cephCSI containers maintained by the cephCSI.
# Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
logLevel: 0
# -- Set logging level for Kubernetes-csi sidecar containers.
# Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
# @default -- `0`
sidecarLogLevel:
# -- CSI driver name prefix for cephfs, rbd and nfs.
# @default -- `namespace name where rook-ceph operator is deployed`
csiDriverNamePrefix:
# -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
rbdPluginUpdateStrategy:
# -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
# @default -- `1`
rbdPluginUpdateStrategyMaxUnavailable:
# -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
cephFSPluginUpdateStrategy:
# -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
# @default -- `1`
cephFSPluginUpdateStrategyMaxUnavailable:
# -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
# @default -- `RollingUpdate`
nfsPluginUpdateStrategy:
# -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
grpcTimeoutInSeconds: 150
# -- Burst to use while communicating with the kubernetes apiserver.
kubeApiBurst:
# -- QPS to use while communicating with the kubernetes apiserver.
kubeApiQPS:
# -- The volume of the CephCSI RBD plugin DaemonSet
csiRBDPluginVolume:
# - name: lib-modules
# hostPath:
# path: /run/booted-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# -- The volume mounts of the CephCSI RBD plugin DaemonSet
csiRBDPluginVolumeMount:
# - name: host-nix
# mountPath: /nix
# readOnly: true
# -- The volume of the CephCSI CephFS plugin DaemonSet
csiCephFSPluginVolume:
# - name: lib-modules
# hostPath:
# path: /run/booted-system/kernel-modules/lib/modules/
# - name: host-nix
# hostPath:
# path: /nix
# -- The volume mounts of the CephCSI CephFS plugin DaemonSet
csiCephFSPluginVolumeMount:
# - name: host-nix
# mountPath: /nix
# readOnly: true
# -- CEPH CSI RBD provisioner resource requirement list
# csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
# @default -- see values.yaml
csiRBDProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-resizer
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-snapshotter
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-rbdplugin
resource:
requests:
memory: 512Mi
limits:
memory: 1Gi
- name : csi-omap-generator
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI RBD plugin resource requirement list
# @default -- see values.yaml
csiRBDPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
- name : csi-rbdplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI CephFS provisioner resource requirement list
# @default -- see values.yaml
csiCephFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-resizer
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-attacher
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-snapshotter
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-cephfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI CephFS plugin resource requirement list
# @default -- see values.yaml
csiCephFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
- name : csi-cephfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
# -- CEPH CSI NFS provisioner resource requirement list
# @default -- see values.yaml
csiNFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
- name : csi-nfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
- name : csi-attacher
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
# -- CEPH CSI NFS plugin resource requirement list
# @default -- see values.yaml
csiNFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 128Mi
cpu: 50m
limits:
memory: 256Mi
- name : csi-nfsplugin
resource:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
# Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
# The CSI provisioner would be best to start on the same nodes as other ceph daemons.
# -- Array of tolerations in YAML format which will be added to CSI provisioner deployment
provisionerTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of the CSI provisioner deployment [^1]
provisionerNodeAffinity: #key1=value1,value2; key2=value3
# Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
# The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
# -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet
pluginTolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
pluginNodeAffinity: # key1=value1,value2; key2=value3
# -- Enable Ceph CSI Liveness sidecar deployment
enableLiveness: false
# -- CSI CephFS driver metrics port
# @default -- `9081`
cephfsLivenessMetricsPort:
# -- CSI Addons server port
# @default -- `9070`
csiAddonsPort:
# -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
# you may want to disable this setting. However, this will cause an issue during upgrades
# with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
forceCephFSKernelClient: true
# -- Ceph CSI RBD driver metrics port
# @default -- `8080`
rbdLivenessMetricsPort:
serviceMonitor:
# -- Enable ServiceMonitor for Ceph CSI drivers
enabled: false
# -- Service monitor scrape interval
interval: 10s
# -- ServiceMonitor additional labels
labels: {}
# -- Use a different namespace for the ServiceMonitor
namespace:
# -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
# @default -- `/var/lib/kubelet`
kubeletDirPath:
# -- Duration in seconds that non-leader candidates will wait to force acquire leadership.
# @default -- `137s`
csiLeaderElectionLeaseDuration:
# -- Deadline in seconds that the acting leader will retry refreshing leadership before giving up.
# @default -- `107s`
csiLeaderElectionRenewDeadline:
# -- Retry period in seconds the LeaderElector clients should wait between tries of actions.
# @default -- `26s`
csiLeaderElectionRetryPeriod:
cephcsi:
# -- Ceph CSI image repository
repository: quay.io/cephcsi/cephcsi
# -- Ceph CSI image tag
tag: v3.13.0
registrar:
# -- Kubernetes CSI registrar image repository
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
# -- Registrar image tag
tag: v2.13.0
provisioner:
# -- Kubernetes CSI provisioner image repository
repository: registry.k8s.io/sig-storage/csi-provisioner
# -- Provisioner image tag
tag: v5.1.0
snapshotter:
# -- Kubernetes CSI snapshotter image repository
repository: registry.k8s.io/sig-storage/csi-snapshotter
# -- Snapshotter image tag
tag: v8.2.0
attacher:
# -- Kubernetes CSI Attacher image repository
repository: registry.k8s.io/sig-storage/csi-attacher
# -- Attacher image tag
tag: v4.8.0
resizer:
# -- Kubernetes CSI resizer image repository
repository: registry.k8s.io/sig-storage/csi-resizer
# -- Resizer image tag
tag: v1.13.1
# -- Image pull policy
imagePullPolicy: IfNotPresent
# -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
cephfsPodLabels: #"key1=value1,key2=value2"
# -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
nfsPodLabels: #"key1=value1,key2=value2"
# -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
rbdPodLabels: #"key1=value1,key2=value2"
csiAddons:
# -- Enable CSIAddons
enabled: false
# -- CSIAddons sidecar image repository
repository: quay.io/csiaddons/k8s-sidecar
# -- CSIAddons sidecar image tag
tag: v0.11.0
nfs:
# -- Enable the nfs csi driver
enabled: false
topology:
# -- Enable topology based provisioning
enabled: false
# NOTE: the value here serves as an example and needs to be
# updated with node labels that define domains of interest
# -- domainLabels define which node labels to use as domains
# for CSI nodeplugins to advertise their domains
domainLabels:
# - kubernetes.io/hostname
# - topology.kubernetes.io/zone
# - topology.rook.io/rack
# -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
# of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
# CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
cephFSAttachRequired: true
# -- Whether to skip any attach operation altogether for RBD PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.
# **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.
# csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set
# to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
# Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
rbdAttachRequired: true
# -- Whether to skip any attach operation altogether for NFS PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
# of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for
# NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
nfsAttachRequired: true
# -- Enable discovery daemon
enableDiscoveryDaemon: false
# -- Set the discovery daemon device discovery interval (default to 60m)
discoveryDaemonInterval: 60m
# -- The timeout for ceph commands in seconds
cephCommandsTimeoutSeconds: "15"
# -- If true, run rook operator on the host network
useOperatorHostNetwork:
# -- If true, scale down the rook operator.
# This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling
# to deploy your helm charts.
scaleDownOperator: false
## Rook Discover configuration
## toleration: NoSchedule, PreferNoSchedule or NoExecute
## tolerationKey: Set this to the specific key of the taint to tolerate
## tolerations: Array of tolerations in YAML format which will be added to agent deployment
## nodeAffinity: Set to labels of the node to match
discover:
# -- Toleration for the discover pods.
# Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
toleration:
# -- The specific key of the taint to tolerate
tolerationKey:
# -- Array of tolerations in YAML format which will be added to discover deployment
tolerations:
# - key: key
# operator: Exists
# effect: NoSchedule
# -- The node labels for affinity of `discover-agent` [^1]
nodeAffinity:
# key1=value1,value2; key2=value3
#
# or
#
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: storage-node
# operator: Exists
# -- Labels to add to the discover pods
podLabels: # "key1=value1,key2=value2"
# -- Add resources to discover daemon pods
resources:
# - limits:
# memory: 512Mi
# - requests:
# cpu: 100m
# memory: 128Mi
# -- Custom label to identify node hostname. If not set `kubernetes.io/hostname` will be used
customHostnameLabel:
# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
hostpathRequiresPrivileged: false
# -- Whether to create all Rook pods to run on the host network, for example in environments where a CNI is not enabled
enforceHostNetwork: false
# -- Disable automatic orchestration when new devices are discovered.
disableDeviceHotplug: false
# -- The revision history limit for all pods created by Rook. If blank, the K8s default is 10.
revisionHistoryLimit:
# -- Blacklist certain disks according to the regex provided.
discoverDaemonUdev:
# -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
imagePullSecrets:
# - name: my-registry-secret
# -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
enableOBCWatchOperatorNamespace: true
# -- Specify the prefix for the OBC provisioner in place of the cluster namespace
# @default -- `ceph cluster namespace`
obcProvisionerNamePrefix:
monitoring:
# -- Enable monitoring. Requires Prometheus to be pre-installed.
# Enabling will also create RBAC rules to allow Operator to create ServiceMonitors
enabled: false
enabled: true
@@ -10,7 +10,10 @@ parameters:
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: ext4
reclaimPolicy: Delete
allowVolumeExpansion: true
@@ -0,0 +1,11 @@
apiVersion: v1
kind: Secret
metadata:
name: cert-dubyatp-xyz
annotations:
replicator.v1.mittwald.de/replicate-from: cert-manager/cert-dubyatp-xyz
replicator.v1.mittwald.de/replicated-keys: tls.crt,tls.key
type: Opaque
stringData:
tls.crt: ""
tls.key: ""
@@ -0,0 +1,32 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: weyma-s3-ingress
spec:
rules:
- host: "weyma-s3.infra.dubyatp.xyz"
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: rook-ceph-rgw-weyma-s3
port:
number: 80
- host: "*.weyma-s3.infra.dubyatp.xyz"
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: rook-ceph-rgw-weyma-s3
port:
number: 80
tls:
- secretName: cert-dubyatp-xyz
hosts:
- weyma-s3.infra.dubyatp.xyz
- "*.weyma-s3.infra.dubyatp.xyz"
@@ -0,0 +1,9 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: weyma-s3-bucket
provisioner: rook-ceph.ceph.rook.io/bucket
reclaimPolicy: Delete
parameters:
objectStoreName: weyma-s3
objectStoreNamespace: rook-ceph
@@ -0,0 +1,27 @@
apiVersion: ceph.rook.io/v1
kind: CephObjectStore
metadata:
name: weyma-s3
namespace: rook-ceph
spec:
dataPool:
application: ""
failureDomain: host
replicated:
size: 3
gateway:
instances: 3
port: 80
metadataPool:
application: ""
erasureCoded:
codingChunks: 0
dataChunks: 0
failureDomain: host
replicated:
size: 3
preservePoolsOnDelete: true
sharedPools:
preserveRadosNamespaceDataOnDelete: false
zone:
name: ""
@@ -28,3 +28,5 @@ parameters:
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
allowVolumeExpansion: true
+1 -1
View File
@@ -24,5 +24,5 @@ appVersion: "1.0"
dependencies:
- name: traefik
version: 37.0.0
version: 39.0.8
repository: https://traefik.github.io/charts
+51 -4
View File
@@ -4,6 +4,7 @@ traefik:
- --entryPoints.websecure.transport.respondingTimeouts.readTimeout=0
ports:
web:
http:
redirections:
entryPoint:
to: websecure
@@ -14,19 +15,29 @@ traefik:
exposedPort: 22
expose:
default: true
tls:
passthrough: true
metrics:
prometheus:
service:
enabled: true
serviceMonitor:
enabled: true
prometheusRule:
enabled: true
rules:
- alert: TraefikDown
expr: up{job="traefik"} == 0
for: 5m
labels:
metrics_enabled: "true"
context: traefik
severity: critical
annotations:
summary: "Traefik Down"
description: "{{ $labels.pod }} on {{ $labels.nodename }} is down"
deployment:
kind: DaemonSet
additionalContainers:
- name: cloudflared
image: cloudflare/cloudflared:2025.8.0
image: cloudflare/cloudflared:2026.3.0
command:
- cloudflared
- tunnel
@@ -69,6 +80,19 @@ traefik:
traefik-real-ip:
excludednets:
- "1.1.1.1/24"
routers:
dispatcharr:
entryPoints:
- websecure
service: dispatcharr
tls:
options: default
rule: 'Host(`dispatcharr.dubyatp.xyz`) && PathPrefix(`/`)'
services:
dispatcharr:
loadBalancer:
servers:
- url: http://10.105.15.20:9191
service:
spec:
externalTrafficPolicy: Local
@@ -105,3 +129,26 @@ traefik:
data:
tls.crt: ""
tls.key: ""
- apiVersion: v1
kind: Service
metadata:
name: traefik-local
spec:
sessionAffinity: ClientIP
sessionAffinityConfig:
clientIP:
timeoutSeconds: 3600
selector:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
ports:
- name: gitssh
port: 22
targetPort: gitssh
- name: web
port: 80
targetPort: web
- name: websecure
port: 443
targetPort: websecure
type: ClusterIP
+1 -1
View File
@@ -24,5 +24,5 @@ appVersion: "1.0"
dependencies:
- name: velero
version: 10.1.0
version: 12.0.0
repository: https://vmware-tanzu.github.io/helm-charts
+45 -2
View File
@@ -2,8 +2,48 @@ velero:
backupsEnabled: true
snapshotsEnabled: false
metrics:
serviceMonitor:
enabled: true
prometheusRule:
enabled: true
spec:
- alert: VeleroBackupFailed
annotations:
message: Velero backup {{ $labels.schedule }} has failed
expr: |-
velero_backup_last_status{schedule!=""} != 1
for: 15m
labels:
metrics_enabled: "true"
severity: warning
- alert: VeleroBackupFailing
annotations:
message: Velero backup {{ $labels.schedule }} has been failing for the last 12h
expr: |-
velero_backup_last_status{schedule!=""} != 1
for: 12h
labels:
severity: critical
- alert: VeleroNoNewBackup
annotations:
message: Velero backup {{ $labels.schedule }} has not run successfully in the last 25h
expr: |-
(
(time() - velero_backup_last_successful_timestamp{schedule!=""}) >bool (25 * 3600)
or
absent(velero_backup_last_successful_timestamp{schedule!=""})
) == 1
for: 1h
labels:
severity: critical
- alert: VeleroBackupPartialFailures
annotations:
message: Velero backup {{ $labels.schedule }} has {{ $value | humanizePercentage }} partialy failed backups
expr: |-
rate(velero_backup_partial_failure_total{schedule!=""}[25m])
/ rate(velero_backup_attempt_total{schedule!=""}[25m]) > 0.5
for: 15m
labels:
severity: warning
configuration:
backupStorageLocation:
- name: weyma-truenas
@@ -19,7 +59,7 @@ velero:
insecureSkipTLSVerify: "true"
initContainers:
- name: velero-plugin-for-aws
image: velero/velero-plugin-for-aws:v1.12.2
image: velero/velero-plugin-for-aws:v1.14.0
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /target
@@ -32,6 +72,9 @@ velero:
velero.io/change-storage-class: RestoreItemAction
data:
ceph-block: weyma-shared
kubectl:
image:
tag: "1.33.4"
extraObjects:
- apiVersion: external-secrets.io/v1
kind: ExternalSecret
+10
View File
@@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: whereabouts-config
namespace: kube-system
annotations:
kubernetes.io/description: |
Configmap containing user customizable cronjob schedule
data:
cron-expression: "30 4 * * *" # Default schedule is once per day at 4:30am. Users may configure this value to their liking.
+70
View File
@@ -0,0 +1,70 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
name: ippools.whereabouts.cni.cncf.io
spec:
group: whereabouts.cni.cncf.io
names:
kind: IPPool
listKind: IPPoolList
plural: ippools
singular: ippool
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: IPPool is the Schema for the ippools API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: IPPoolSpec defines the desired state of IPPool
properties:
allocations:
additionalProperties:
description: IPAllocation represents metadata about the pod/container
owner of a specific IP
properties:
id:
type: string
ifname:
type: string
podref:
type: string
required:
- id
- podref
type: object
description: |-
Allocations is the set of allocated IPs for the given range. Its` indices are a direct mapping to the
IP with the same index/offset for the pool's range.
type: object
range:
description: Range is a RFC 4632/4291-style string that represents
an IP address and prefix length in CIDR notation
type: string
required:
- allocations
- range
type: object
type: object
served: true
storage: true
@@ -0,0 +1,56 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
name: overlappingrangeipreservations.whereabouts.cni.cncf.io
spec:
group: whereabouts.cni.cncf.io
names:
kind: OverlappingRangeIPReservation
listKind: OverlappingRangeIPReservationList
plural: overlappingrangeipreservations
singular: overlappingrangeipreservation
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: OverlappingRangeIPReservation is the Schema for the OverlappingRangeIPReservations
API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: OverlappingRangeIPReservationSpec defines the desired state
of OverlappingRangeIPReservation
properties:
containerid:
type: string
ifname:
type: string
podref:
type: string
required:
- podref
type: object
required:
- spec
type: object
served: true
storage: true
+76
View File
@@ -0,0 +1,76 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: whereabouts
namespace: kube-system
labels:
tier: node
app: whereabouts
spec:
selector:
matchLabels:
name: whereabouts
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: whereabouts
name: whereabouts
spec:
hostNetwork: true
serviceAccountName: whereabouts
tolerations:
- operator: Exists
effect: NoSchedule
containers:
- name: whereabouts
command: [ "/bin/sh" ]
args:
- -c
- |
SLEEP=false source /install-cni.sh
/token-watcher.sh &
/ip-control-loop -log-level debug
image: ghcr.io/k8snetworkplumbingwg/whereabouts:v0.9.2
env:
- name: NODENAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: WHEREABOUTS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
requests:
cpu: "100m"
memory: "100Mi"
limits:
cpu: "300m"
memory: "200Mi"
securityContext:
privileged: true
volumeMounts:
- name: cnibin
mountPath: /host/opt/cni/bin
- name: cni-net-dir
mountPath: /host/etc/cni/net.d
- name: cron-scheduler-configmap
mountPath: /cron-schedule
volumes:
- name: cnibin
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
- name: cron-scheduler-configmap
configMap:
name: "whereabouts-config"
defaultMode: 0744
items:
- key: "cron-expression"
path: "config"
+76
View File
@@ -0,0 +1,76 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: whereabouts
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: whereabouts
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: whereabouts-cni
subjects:
- kind: ServiceAccount
name: whereabouts
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: whereabouts-cni
rules:
- apiGroups:
- whereabouts.cni.cncf.io
resources:
- ippools
- overlappingrangeipreservations
- nodeslicepools
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- '*'
- apiGroups: [""]
resources:
- pods
verbs:
- list
- watch
- get
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- network-attachment-definitions
verbs:
- get
- list
- watch
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- update
- get