Compare commits

..

238 Commits

Author SHA1 Message Date
579b4077d2 yt-dlp-bot: deploy update to 1ef217f 2025-11-29 00:44:39 +00:00
1946931937 dispatcharr: fix transcoding 2025-11-28 10:39:12 -05:00
856ee307ff dispatcharr: add hw transcoding 2025-11-27 20:39:09 -05:00
f4da25c243 jellyfin: block public access to metrics 2025-11-26 10:41:21 -05:00
19f8dd922b jellyfin: enable monitoring 2025-11-26 09:58:00 -05:00
2e26a5df9e gitea: metrics should be under gitea.gitea 2025-11-26 09:53:29 -05:00
5ffaa2d65b gitea: enable metrics 2025-11-26 09:52:24 -05:00
b2d4211583 gitea: attempt to increase valkey-cluster limits 2025-11-24 15:02:51 -05:00
b64ddb7b71 Merge pull request 'chore(deps): update helm release grafana to v10.2.0' (#61) from renovate/grafana-10.x into main
Reviewed-on: #61
2025-11-21 18:42:43 +00:00
447c4b618b grafana: re-enable 2025-11-21 13:22:16 -05:00
2a5057dc5b chore(deps): update helm release grafana to v10.2.0 2025-11-21 13:00:09 +00:00
a58d4dc19f grafana: take offline temporarily for prometheus migrations 2025-11-20 08:59:19 -05:00
482ddda271 authentik: rm redis, no longer a dependency 2025-11-19 15:03:34 -05:00
e401a71a01 Merge pull request 'chore(deps): update helm release authentik to v2025.10.2' (#59) from renovate/authentik-2025.x into main
Reviewed-on: #59
2025-11-19 19:58:37 +00:00
c1053309e8 Merge pull request 'chore(deps): update helm release grafana to v10.1.5' (#60) from renovate/grafana-10.x into main
Reviewed-on: #60
2025-11-19 19:54:19 +00:00
6666fd467f chore(deps): update helm release grafana to v10.1.5 2025-11-19 17:00:13 +00:00
3a2e9e0e9d chore(deps): update helm release authentik to v2025.10.2 2025-11-19 14:00:10 +00:00
d17fce4dcc update postgres url for gitea, netmaker, and peertube 2025-11-13 21:16:49 -05:00
a514eed9cf authentik: once again try to use pooler url 2025-11-13 21:06:55 -05:00
d747936738 Revert "authentik: migrate to pgsql pooler url"
This reverts commit 7c65d7a917.
2025-11-13 16:31:54 -05:00
7c65d7a917 authentik: migrate to pgsql pooler url 2025-11-13 15:45:50 -05:00
7d456d5bd8 yt-dlp-bot: deploy update to db488e0 2025-11-12 03:32:49 +00:00
63fdcf4b41 yt-dlp-bot: deploy update to e124ea3 2025-11-12 03:24:57 +00:00
b7b33c9812 yt-dlp-bot: deploy update to 2f47bbd 2025-11-12 03:15:02 +00:00
bb4ac79006 yt-dlp-bot: deploy update to 709ccdd 2025-11-12 02:51:52 +00:00
908ec8cb85 Merge pull request 'chore(deps): update docker.io/bats/bats docker tag to v1.13.0' (#56) from renovate/docker.io-bats-bats-1.x into main
Reviewed-on: #56
2025-11-08 15:51:11 +00:00
894d2d90bb chore(deps): update docker.io/bats/bats docker tag to v1.13.0 2025-11-07 17:00:10 +00:00
9d58d47a98 Merge pull request 'chore(deps): update helm release jellyfin to v2.5.0' (#54) from renovate/jellyfin-2.x into main
Reviewed-on: #54
2025-11-04 00:46:04 +00:00
0fc227c978 Merge pull request 'chore(deps): update helm release authentik to v2025.10.1' (#55) from renovate/authentik-2025.x into main
Reviewed-on: #55
2025-11-04 00:45:39 +00:00
5d642abfb2 chore(deps): update helm release authentik to v2025.10.1 2025-11-03 18:00:18 +00:00
06ce0d7020 chore(deps): update helm release jellyfin to v2.5.0 2025-11-03 04:00:12 +00:00
d1a1b90de4 Merge pull request 'chore(deps): update helm release grafana to v10.1.4' (#53) from renovate/grafana-10.x into main
Reviewed-on: #53
2025-10-30 01:20:23 +00:00
34e18748d2 chore(deps): update helm release grafana to v10.1.4 2025-10-29 23:00:13 +00:00
57dfd393bf Merge pull request 'chore(deps): update helm release authentik to v2025.10.0' (#52) from renovate/authentik-2025.x into main
Reviewed-on: #52
2025-10-28 17:46:08 +00:00
ee3f5d2d7e chore(deps): update helm release authentik to v2025.10.0 2025-10-27 21:00:08 +00:00
d1ad8fc440 netmaker: fix ui 2025-10-26 22:28:41 -04:00
821cf8f067 yt-dlp-bot: deploy update to 8eb6bf3 2025-10-26 18:17:58 +00:00
a38752d7e6 netmaker: prod config 2025-10-26 14:13:40 -04:00
65d8ddebb1 netmaker: add API and UI 2025-10-23 19:38:35 -04:00
8c878150d9 Merge remote-tracking branch 'refs/remotes/origin/main' 2025-10-22 22:02:22 -04:00
e6c0474bbb netmaker stub 2025-10-22 22:02:07 -04:00
d0fdc8c932 yt-dlp-bot: deploy update to 0c21a85 2025-10-22 23:07:02 +00:00
cb0cbcf1fb yt-dlp-bot: deploy update to d29c045 2025-10-22 21:35:51 +00:00
c4c25a647b yt-dlp-bot: deploy update to c2c228d 2025-10-22 15:04:21 +00:00
27e477af6f renovate: add ssh signing 2025-10-22 10:34:57 -04:00
9b53acf467 Merge pull request 'chore(deps): update helm release jellyfin to v2.4.0' (#51) from renovate/jellyfin-2.x into main
Reviewed-on: #51
2025-10-20 13:09:38 +00:00
8882f1a3d7 chore(deps): update helm release jellyfin to v2.4.0 2025-10-20 04:00:10 +00:00
12dfdc80ec Merge pull request 'chore(deps): update helm release grafana to v10.1.2' (#50) from renovate/grafana-10.x into main
Reviewed-on: #50
2025-10-18 14:54:16 +00:00
10f79e298e chore(deps): update helm release grafana to v10.1.2 2025-10-18 13:00:17 +00:00
fad154cf7d Merge pull request 'chore(deps): update helm release grafana to v10.1.1' (#49) from renovate/grafana-10.x into main
Reviewed-on: #49
2025-10-15 17:24:09 +00:00
56e71cb9e2 chore(deps): update helm release grafana to v10.1.1 2025-10-15 16:00:09 +00:00
f8f80a393e yt-dlp-bot: deploy update to cef71e1 2025-10-15 02:18:52 +00:00
604323e615 gitea: enable valkey cluster from chart 2025-10-12 20:10:48 -04:00
5049bd2f38 Merge pull request 'chore(deps): update helm release grafana to v10.1.0' (#48) from renovate/grafana-10.x into main
Reviewed-on: #48
2025-10-12 04:35:22 +00:00
cb9ec59e78 chore(deps): update helm release grafana to v10.1.0 2025-10-10 16:00:18 +00:00
b3b7521f99 yt-dlp-bot: deploy update to 7f1182f 2025-10-09 12:52:00 +00:00
d3d5b1eba6 yt-dlp-bot: deploy update to 0cf1b4a 2025-10-08 13:21:39 +00:00
47d716d393 yt-dlp-bot: deploy update to 0821359 2025-10-08 13:19:54 +00:00
f8ee4b70ae yt-dlp-bot: deploy update to 7c3905e 2025-10-08 13:08:23 +00:00
05150b8d4b yt-dlp-bot: deploy update to 2d9241b 2025-10-08 13:05:09 +00:00
983de12bf9 rm wekan 2025-10-06 20:51:18 -04:00
d817b492bf Merge pull request 'chore(deps): update helm release gitea to v12.4.0' (#46) from renovate/gitea-12.x into main
Reviewed-on: #46
2025-10-06 16:48:17 +00:00
3bda4aeead chore(deps): update helm release gitea to v12.4.0 2025-10-06 15:00:16 +00:00
d81fbc5266 Merge pull request 'chore(deps): update helm release authentik to v2025.8.4' (#45) from renovate/authentik-2025.x into main
Reviewed-on: #45
2025-09-30 15:14:06 +00:00
6df1bae2a2 chore(deps): update helm release authentik to v2025.8.4 2025-09-30 11:00:09 +00:00
e819c0a19e yt-dlp-bot: deploy update to 236ef0d 2025-09-29 16:52:02 +00:00
449a5884c1 add arr-stack and jellyfin to velero backups 2025-09-29 08:46:53 -04:00
f7950a6632 yt-dlp-bot: deploy update to f7cb27b 2025-09-28 04:16:28 +00:00
7b68fb1578 yt-dlp-bot: deploy update to feb99c0 2025-09-26 19:50:42 +00:00
2b2d1766af jellyfin: increase config PVC to 25Gi 2025-09-25 17:32:50 -04:00
13ea9a56dc yt-dlp-bot: deploy update to d06af69 2025-09-23 18:48:03 +00:00
0e4c3a219a add kubernetes manifests to renovate 2025-09-19 22:12:19 -04:00
c5e7f62f29 add sonarr 2025-09-18 22:57:12 -04:00
ef5cb3b27b rm ingresses to make way for authentik to handle ingress 2025-09-18 22:57:12 -04:00
782bbcc568 Merge pull request 'chore(deps): update helm release wekan to v7.95.0' (#44) from renovate/wekan-7.x into main
Reviewed-on: #44
2025-09-19 00:00:12 +00:00
37edf93b21 add flaresolvarr, prowlarr 2025-09-18 18:53:33 -04:00
692e8fd9b6 add arr-stack 2025-09-18 17:41:47 -04:00
543a85c4fb chore(deps): update helm release wekan to v7.95.0 2025-09-18 20:00:09 +00:00
49c5f5d8c9 rm emby 2025-09-18 10:07:13 -04:00
453fadfd2c Merge pull request 'chore(deps): update helm release grafana to v10' (#43) from renovate/grafana-10.x into main
Reviewed-on: #43
2025-09-17 14:41:58 +00:00
09fbc4e474 chore(deps): update helm release grafana to v10 2025-09-16 20:00:09 +00:00
9f953e79ca authentik: i forgor to add smtp username 2025-09-16 14:14:01 -04:00
98628a2113 authentik: use default port 25 for smtp 2025-09-16 14:05:56 -04:00
2fc19ce70d authentik: extend smtp timeout 2025-09-16 14:00:36 -04:00
bbe1d21048 authentik: setup smtp 2025-09-16 13:53:12 -04:00
4253a6845e Merge pull request 'chore(deps): update helm release authentik to v2025.8.3' (#42) from renovate/authentik-2025.x into main
Reviewed-on: #42
2025-09-16 17:33:53 +00:00
15a82b2ebb chore(deps): update helm release authentik to v2025.8.3 2025-09-16 17:00:10 +00:00
aa98845e6a Merge pull request 'chore(deps): update helm release gitea to v12.3.0' (#41) from renovate/gitea-12.x into main
Reviewed-on: #41
2025-09-15 22:13:02 +00:00
fd5879f4ba Merge pull request 'chore(deps): update helm release authentik to v2025.8.2' (#40) from renovate/authentik-2025.x into main
Reviewed-on: #40
2025-09-15 22:12:43 +00:00
2051d563f8 chore(deps): update helm release gitea to v12.3.0 2025-09-15 17:00:14 +00:00
d4e4cb1313 chore(deps): update helm release authentik to v2025.8.2 2025-09-15 17:00:09 +00:00
ae13754c27 rm emby ingress 2025-09-15 12:52:13 -04:00
fcc65c33fa add jellyfin 2025-09-15 12:51:47 -04:00
83a337f768 yt-dlp-bot: deploy update to 44b59e3 2025-09-13 04:30:45 +00:00
e42943d0d4 Merge pull request 'chore(deps): update helm release grafana to v9.4.5' (#39) from renovate/grafana-9.x into main
Reviewed-on: #39
2025-09-11 21:40:23 +00:00
9a43144600 chore(deps): update helm release grafana to v9.4.5 2025-09-11 14:00:08 +00:00
d0386c0204 yt-dlp-bot: deploy update to d478896 2025-09-06 02:34:47 +00:00
8e53b08ed6 add dispatcharr 2025-09-03 18:40:20 -04:00
f9126499c2 xmltv: add OTA 2025-09-03 18:36:20 -04:00
73e36385f8 add zap2xml 2025-09-03 14:39:55 -04:00
4406b80872 Merge pull request 'chore(deps): update helm release grafana to v9.4.4' (#38) from renovate/grafana-9.x into main
Reviewed-on: #38
2025-09-01 14:20:44 +00:00
8ca7676f61 chore(deps): update helm release grafana to v9.4.4 2025-09-01 08:00:08 +00:00
0c6be07c25 yt-dlp-bot: deploy update to 5c5cba7 2025-08-31 21:02:05 +00:00
ebb7443524 Merge pull request 'chore(deps): update helm release grafana to v9.4.3' (#37) from renovate/grafana-9.x into main
Reviewed-on: #37
2025-08-31 20:58:03 +00:00
150a50a5a9 chore(deps): update helm release grafana to v9.4.3 2025-08-29 21:00:09 +00:00
790929ce1e yt-dlp-bot: deploy update to df8c7e9 2025-08-26 20:44:08 +00:00
1e6abc98f2 Merge pull request 'chore(deps): update helm release grafana to v9.3.6' (#36) from renovate/grafana-9.x into main
Reviewed-on: #36
2025-08-26 18:30:40 +00:00
4fd6184c44 chore(deps): update helm release grafana to v9.3.6 2025-08-26 17:00:08 +00:00
fea8e75871 add peertube 2025-08-25 22:20:07 -04:00
51ac82eef0 Merge pull request 'chore(deps): update helm release grafana to v9.3.5' (#35) from renovate/grafana-9.x into main
Reviewed-on: #35
2025-08-25 22:59:04 +00:00
5dc6775ad9 chore(deps): update helm release grafana to v9.3.5 2025-08-25 19:00:09 +00:00
fe22b2f897 yt-dlp-bot: deploy update to 468603f 2025-08-24 15:21:45 +00:00
e9c71bd41c yt-dlp-bot: deploy update to a8a24ef 2025-08-23 21:00:15 +00:00
2efaf2f7b9 yt-dlp-bot: deploy update to db2bff7 2025-08-23 00:08:22 +00:00
dc3060a35f gitea-runner: use proper deployment manifest 2025-08-22 19:33:56 -04:00
51eeb300db gitea-runner: require images to be pulled always 2025-08-22 19:31:25 -04:00
286d505398 Merge pull request 'chore(deps): update helm release grafana to v9.3.4' (#34) from renovate/grafana-9.x into main
Reviewed-on: #34
2025-08-22 17:32:35 +00:00
fe4dbc8746 Merge pull request 'chore(deps): update helm release authentik to v2025.8.1' (#33) from renovate/authentik-2025.x into main
Reviewed-on: #33
2025-08-22 17:32:18 +00:00
8eb5729e72 chore(deps): update helm release grafana to v9.3.4 2025-08-22 17:00:11 +00:00
2fe858fc2c chore(deps): update helm release authentik to v2025.8.1 2025-08-22 16:00:08 +00:00
0aa5e8e74c Merge pull request 'chore(deps): update helm release grafana to v9.3.3' (#32) from renovate/grafana-9.x into main
Reviewed-on: #32
2025-08-22 11:16:29 +00:00
08e852729c chore(deps): update helm release grafana to v9.3.3 2025-08-21 18:00:08 +00:00
13b385a3b4 Merge pull request 'chore(deps): update helm release authentik to v2025.8.0' (#31) from renovate/authentik-2025.x into main
Reviewed-on: #31
2025-08-21 15:04:29 +00:00
21d09fa020 chore(deps): update helm release authentik to v2025.8.0 2025-08-20 19:00:09 +00:00
131aad1403 yt-dlp-bot: deploy update to 963e1ef 2025-08-20 11:02:51 +00:00
d77493fc4d Merge pull request 'chore(deps): update helm release gitea to v12.2.0' (#30) from renovate/gitea-12.x into main
Reviewed-on: #30
2025-08-19 22:41:32 +00:00
54e009efea chore(deps): update helm release gitea to v12.2.0 2025-08-19 16:00:08 +00:00
a66540b327 yt-dlp-bot: deploy update to 96a35b6 2025-08-18 23:31:19 +00:00
1541d40407 Merge pull request 'chore(deps): update helm release wekan to v7.94.0' (#29) from renovate/wekan-7.x into main
Reviewed-on: #29
2025-08-18 23:28:59 +00:00
35a4335800 chore(deps): update helm release wekan to v7.94.0 2025-08-18 17:00:10 +00:00
bcf06b2ba0 yt-dlp-bot: deploy update to 6009693 2025-08-16 16:12:52 +00:00
0507262ff4 Merge pull request 'chore(deps): update helm release grafana to v9.3.2' (#28) from renovate/grafana-9.x into main
Reviewed-on: #28
2025-08-13 18:12:56 +00:00
1c3a6d7d0a chore(deps): update helm release grafana to v9.3.2 2025-08-12 09:00:08 +00:00
cc64d87999 yt-dlp-bot: deploy update to 1a61e23 2025-08-11 08:11:28 +00:00
3c51149b5b yt-dlp-bot: deploy update to 21f1da5 2025-08-08 07:53:11 +00:00
9c2c2a4404 Merge pull request 'chore(deps): update helm release grafana to v9.3.1' (#26) from renovate/grafana-9.x into main
Reviewed-on: #26
2025-08-05 23:52:52 +00:00
382edf3fab Merge pull request 'chore(deps): update helm release gitea to v12.1.3' (#27) from renovate/gitea-12.x into main
Reviewed-on: #27
2025-08-05 00:35:45 +00:00
c2add28711 chore(deps): update helm release gitea to v12.1.3 2025-08-04 20:00:10 +00:00
613cfd272e chore(deps): update helm release grafana to v9.3.1 2025-08-04 16:00:09 +00:00
d0f6217431 gitea: minor fix 2025-08-03 09:05:41 -04:00
af0551b7ed gitea: enable registration 2025-08-03 09:03:14 -04:00
186ec362a7 yt-dlp-bot: deploy update to c0a9422 2025-08-02 19:46:24 +00:00
f10f0a737f Merge pull request 'chore(deps): update helm release grafana to v9.3.0' (#25) from renovate/grafana-9.x into main
Reviewed-on: #25
2025-07-25 22:12:41 +00:00
603dbb57b9 chore(deps): update helm release grafana to v9.3.0 2025-07-25 13:00:08 +00:00
f20bba9299 Merge pull request 'chore(deps): update helm release authentik to v2025.6.4' (#24) from renovate/authentik-2025.x into main
Reviewed-on: #24
2025-07-22 20:02:46 +00:00
36040d3b45 chore(deps): update helm release authentik to v2025.6.4 2025-07-22 14:00:10 +00:00
7076bd92fc yt-dlp-bot: deploy update to dbcc948 2025-07-22 00:17:40 +00:00
3cc014c882 Merge pull request 'chore(deps): update helm release gitea to v12.1.2' (#23) from renovate/gitea-12.x into main
Reviewed-on: #23
2025-07-19 16:54:53 +00:00
56a47e82f7 chore(deps): update helm release gitea to v12.1.2 2025-07-19 16:00:09 +00:00
0eaed9f3a4 Merge pull request 'chore(deps): update helm release wekan to v7.93.0' (#22) from renovate/wekan-7.x into main
Reviewed-on: #22
2025-07-18 15:43:37 +00:00
513853458f chore(deps): update helm release wekan to v7.93.0 2025-07-18 06:00:09 +00:00
33712ca36c yt-dlp-bot: deploy update to bffe801 2025-07-17 03:10:37 +00:00
df6d831db6 yt-dlp-bot: deploy update to 2025-07-17 03:07:52 +00:00
88009e3dcc gitea: change sendgrid to smtp2go 2025-07-14 13:09:58 -04:00
dc66685a59 vaultwarden: cm correction 2025-07-14 11:59:36 -04:00
d236dffa33 vaultwarden: switch sendgrid to smtp2go 2025-07-14 11:58:02 -04:00
498ca54ac2 yt-dlp-bot: update to 3ffb2e4 2025-07-12 16:55:39 -04:00
c13186129b renovate: add yt-dlp-bot 2025-07-11 22:20:36 -04:00
9c825e4e56 yt-dlp-bot: add 2025-07-11 22:14:09 -04:00
391a416a0a update gitignore 2025-07-10 16:46:36 -04:00
173cc0abc9 wekan: add 2025-07-10 16:46:17 -04:00
55b01276ba authentik: rvv :< 2025-07-08 19:05:05 -04:00
2bc78b0e32 authentik: would help if i used actual redis URLs 2025-07-08 18:58:20 -04:00
c0abe17770 authentik: DISREGARD THAT I SUC--cluster support should be there as long as the URLs are defined individually 2025-07-08 18:53:22 -04:00
3602d9fc43 authentik: rvv, does not support redis/valkey clusters 2025-07-08 18:34:14 -04:00
1cde43e4d6 authentik: only deploy valkey cluster, no migrate yet 2025-07-08 18:20:57 -04:00
dd01fba8df authentik: rvv redis update 2025-07-07 22:21:23 -04:00
c1ae4f0e5b authentik: fix redis config 2025-07-07 21:51:56 -04:00
e6fdac2408 authentik: migrate to valkey operator 2025-07-07 21:39:21 -04:00
febf7e7ead gitea: use valkey operator 2025-07-07 21:19:50 -04:00
ee627343cf Merge pull request 'chore(deps): update helm release grafana to v9.2.10' (#20) from renovate/grafana-9.x into main
Reviewed-on: #20
2025-07-04 22:03:04 +00:00
b6e712b55c chore(deps): update helm release grafana to v9.2.10 2025-07-04 22:00:09 +00:00
714e100cbd Merge pull request 'chore(deps): update helm release gitea to v12.1.1' (#18) from renovate/gitea-12.x into main
Reviewed-on: #18
2025-06-27 18:54:10 +00:00
02bb77efc1 Merge pull request 'chore(deps): update helm release grafana to v9.2.9' (#17) from renovate/grafana-9.x into main
Reviewed-on: #17
2025-06-27 15:12:58 +00:00
ae16663431 Merge pull request 'chore(deps): update helm release authentik to v2025.6.3' (#19) from renovate/authentik-2025.x into main
Reviewed-on: #19
2025-06-27 15:12:43 +00:00
0ac3ba5a9c chore(deps): update helm release authentik to v2025.6.3 2025-06-27 15:00:08 +00:00
593fa10231 chore(deps): update helm release gitea to v12.1.1 2025-06-27 14:00:09 +00:00
cf30a14ed9 chore(deps): update helm release grafana to v9.2.9 2025-06-27 13:00:08 +00:00
d63abb92da renovate: update config 2025-06-24 23:11:39 -04:00
359025d090 gitea: podannotations 2025-06-20 16:00:40 -04:00
c03f8c6f68 gitea: velero backup is supposed to be annotation, not label 2025-06-20 15:51:21 -04:00
16e38f18fb Revert "nextcloud: upgrade to 28.0.14"
This reverts commit d913cc08f5.
2025-06-20 14:38:30 -04:00
d913cc08f5 nextcloud: upgrade to 28.0.14 2025-06-20 14:31:52 -04:00
790ff3b070 nextcloud: correct version num 2025-06-20 12:44:16 -04:00
76d59daa0a nextcloud: rvv to 28.0.3.2 2025-06-20 12:41:26 -04:00
644ca1acea nextcloud: attempt upgrade again 2025-06-20 11:42:51 -04:00
5cd7107586 nextcloud: revert while imgpull errors are troubleshooted 2025-06-20 11:38:37 -04:00
6fcd89be47 nextcloud: possible issue with latest apache img 2025-06-20 11:36:27 -04:00
c894920b6a nextcloud: upgrade to 29.0.16 2025-06-20 11:31:13 -04:00
b050d7671c Merge pull request 'chore(deps): update helm release gitea to v12.1.0' (#13) from renovate/gitea-12.x into main
Reviewed-on: #13
2025-06-20 01:07:12 +00:00
dbc55fe713 gitea: expand replicas to 3 2025-06-19 21:05:11 -04:00
6d4eccd351 gitea: add to velero backup 2025-06-19 20:58:34 -04:00
a812d36ffa update ingress to reflect proper gitea url 2025-06-19 20:53:57 -04:00
dad2b4f07f Merge pull request 'chore(deps): update helm release grafana to v9.2.7' (#16) from renovate/grafana-9.x into main
Reviewed-on: #16
2025-06-18 21:30:56 +00:00
f691751e92 chore(deps): update helm release grafana to v9.2.7 2025-06-18 21:00:08 +00:00
9f7956e5a4 Merge pull request 'chore(deps): update helm release grafana to v9.2.6' (#15) from renovate/grafana-9.x into main
Reviewed-on: #15
2025-06-17 20:14:47 +00:00
47307c0ed8 chore(deps): update helm release grafana to v9.2.6 2025-06-17 18:00:09 +00:00
5324efa646 Merge pull request 'chore(deps): update helm release authentik to v2025.6.2' (#14) from renovate/authentik-2025.x into main
Reviewed-on: #14
2025-06-17 15:36:53 +00:00
4f396ac28a chore(deps): update helm release authentik to v2025.6.2 2025-06-17 08:00:08 +00:00
9af2ae75ea Merge pull request 'chore(deps): update helm release grafana to v9.2.4' (#12) from renovate/grafana-9.x into main
Reviewed-on: #12
2025-06-16 15:11:40 +00:00
933fd4fe8c chore(deps): update helm release gitea to v12.1.0 2025-06-16 15:00:34 +00:00
99491aacc7 chore(deps): update helm release grafana to v9.2.4 2025-06-16 13:00:08 +00:00
6ba0558e58 gitea: use ingressroutetcp 2025-06-15 20:17:07 -04:00
def5fb8813 rvv 2025-06-15 20:14:32 -04:00
a1462b732d gitea: curious test 2025-06-15 20:14:00 -04:00
a1f00a63b3 gitea: fix ingressroute manifest 2025-06-15 20:10:58 -04:00
7e9fd148d7 aaaa 2025-06-15 20:09:47 -04:00
2b528df50f gitea: fix ingressroute syntax 2025-06-15 20:08:39 -04:00
09e035cf03 gitea: add ssh ingressroute 2025-06-15 20:07:54 -04:00
08a85f7d8d add gitea 2025-06-15 19:19:11 -04:00
02a1cd04e4 Merge pull request 'chore(deps): update helm release authentik to v2025.6.1' (#10) from renovate/authentik-2025.x into main
Reviewed-on: #10
2025-06-15 14:15:53 +00:00
554ffd3a3e authentik: bring back up with new db 2025-06-15 10:10:04 -04:00
85626ef086 authentik: get rid of the templating all together, not necessary for app 2025-06-15 10:09:17 -04:00
767cf93797 authentik: does this escape work?! 2025-06-15 10:06:30 -04:00
591c663daf maybe if i actually edited the right file... 2025-06-15 10:05:40 -04:00
4da1f6030b authentik: try using template with single quote to prevent helm from thinking its part of a template 2025-06-15 10:02:50 -04:00
62f22dc55d authentik: proper escapes 2025-06-15 09:59:56 -04:00
9121acfbea authentik: try pw template with escapes 2025-06-15 09:58:50 -04:00
929fe1d782 authentik: troubleshooting 2025-06-15 09:53:51 -04:00
8767034c22 bring back online with new shared db 2025-06-15 09:49:19 -04:00
ac45bd8d74 authentik: maintenance: disable all replicas 2025-06-15 09:19:44 -04:00
6bb384180d Merge pull request 'chore(deps): update helm release grafana to v9.2.3' (#11) from renovate/grafana-9.x into main
Reviewed-on: #11
2025-06-13 15:14:59 +00:00
866996ad68 chore(deps): update helm release grafana to v9.2.3 2025-06-13 14:00:18 +00:00
59bd143157 chore(deps): update helm release authentik to v2025.6.1 2025-06-07 01:00:17 +00:00
eb1356aa67 Merge pull request 'chore(deps): update helm release grafana to v9.2.2' (#9) from renovate/grafana-9.x into main
Reviewed-on: #9
2025-06-04 19:04:44 +00:00
3a525e49be chore(deps): update helm release grafana to v9.2.2 2025-06-02 12:00:23 +00:00
16bd8aaffa immich: upgrade to v1.134.0 2025-05-27 15:28:24 -04:00
a4c7bb764b grafana: remove additional irrelevant/redundant values 2025-05-27 15:14:42 -04:00
4089400f58 immich: update deployment to use new db 2025-05-27 14:15:50 -04:00
7110b1878d grafana: change deploymentstrategy to recreate 2025-05-27 10:13:51 -04:00
a23d978668 grafana: cleanup values file 2025-05-27 10:12:17 -04:00
b4e3062480 add renovate 2025-05-27 10:02:36 -04:00
8be1db7ef9 Merge pull request 'chore(deps): update helm release grafana to v9.2.1' (#8) from renovate/grafana-9.x into main
Reviewed-on: #8
2025-05-27 13:47:22 +00:00
7bf065764a chore(deps): update helm release grafana to v9.2.1 2025-05-27 13:44:37 +00:00
5a4896927f immich: update apiVersion on external-secret from v1beta1 to v1 2025-05-26 10:15:46 -04:00
3f8819d4eb update externalsecrets to use v1 instead of v1beta1 2025-05-26 10:04:30 -04:00
31a2413834 Merge pull request 'chore(deps): update helm release grafana to v9.2.0' (#6) from renovate/grafana-9.x into main
Reviewed-on: #6
2025-05-24 11:41:20 +00:00
4b9bbe6a9e chore(deps): update helm release grafana to v9.2.0 2025-05-23 16:00:16 +00:00
77 changed files with 1942 additions and 611 deletions

2
.gitignore vendored
View File

@@ -1 +1,3 @@
*-testing/
Chart.lock
charts/

View File

@@ -0,0 +1,24 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: flaresolverr
spec:
replicas: 1
selector:
matchLabels:
app: flaresolverr
template:
metadata:
labels:
app: flaresolverr
spec:
containers:
- name: flaresolverr
image: ghcr.io/flaresolverr/flaresolverr:v3.4.1
resources:
limits:
memory: "4Gi"
cpu: "1"
requests:
memory: "2Gi"
cpu: "0.5"

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: flaresolverr
spec:
selector:
app: flaresolverr
ports:
- port: 8191
targetPort: 8191

View File

@@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: prowlarr
spec:
replicas: 1
selector:
matchLabels:
app: prowlarr
template:
metadata:
labels:
app: prowlarr
annotations:
backup.velero.io/backup-volumes: config
spec:
containers:
- name: prowlarr
image: linuxserver/prowlarr:version-2.0.5.5160
volumeMounts:
- name: config
mountPath: /config
resources:
limits:
memory: "1Gi"
cpu: "1"
requests:
memory: "512Mi"
cpu: "0.5"
volumes:
- name: config
persistentVolumeClaim:
claimName: prowlarr-config

View File

@@ -1,12 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: emby-config
name: prowlarr-config
spec:
storageClassName: weyma-shared
resources:
requests:
storage: 10Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
- ReadWriteMany

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: prowlarr
spec:
selector:
app: prowlarr
ports:
- port: 9696
targetPort: 9696

View File

@@ -0,0 +1,45 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr
spec:
replicas: 1
selector:
matchLabels:
app: radarr
template:
metadata:
labels:
app: radarr
annotations:
backup.velero.io/backup-volumes: config
spec:
containers:
- name: radarr
image: linuxserver/radarr:version-5.27.5.10198
volumeMounts:
- name: config
mountPath: /config
- name: downloads
mountPath: /mnt/Downloads
- name: movies
mountPath: /mnt/movies
resources:
limits:
memory: "1Gi"
cpu: "1"
requests:
memory: "512Mi"
cpu: "0.5"
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr-config
- name: movies
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/movies
- name: downloads
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/syncthing-downloads

View File

@@ -1,12 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: resilio-pvc
name: radarr-config
spec:
storageClassName: weyma-shared
resources:
requests:
storage: 10Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
- ReadWriteMany

10
arr-stack/radarr/svc.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: radarr
spec:
selector:
app: radarr
ports:
- port: 7878
targetPort: 7878

View File

@@ -0,0 +1,45 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sonarr
spec:
replicas: 1
selector:
matchLabels:
app: sonarr
template:
metadata:
labels:
app: sonarr
annotations:
backup.velero.io/backup-volumes: config
spec:
containers:
- name: sonarr
image: linuxserver/sonarr:4.0.15
volumeMounts:
- name: config
mountPath: /config
- name: downloads
mountPath: /mnt/Downloads
- name: tv-shows
mountPath: /mnt/tv-shows
resources:
limits:
memory: "1Gi"
cpu: "1"
requests:
memory: "512Mi"
cpu: "0.5"
volumes:
- name: config
persistentVolumeClaim:
claimName: sonarr-config
- name: tv-shows
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/tv-shows
- name: downloads
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/syncthing-downloads

11
arr-stack/sonarr/pvc.yaml Normal file
View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: sonarr-config
spec:
resources:
requests:
storage: 10Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany

10
arr-stack/sonarr/svc.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: sonarr
spec:
selector:
app: sonarr
ports:
- port: 8989
targetPort: 8989

View File

@@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deluge-tunnel
spec:
selector:
matchLabels:
app: deluge-tunnel
template:
metadata:
labels:
app: deluge-tunnel
spec:
containers:
- name: deluge-tunnel
image: kroniak/ssh-client:3.21
command: ["/bin/sh", "-c", "ssh -o StrictHostKeyChecking=no weyma-talos@45.152.211.243 -p 2222 -L 0.0.0.0:58846:127.0.0.1:58846 -L 0.0.0.0:8112:127.0.0.1:8112 -N"]
volumeMounts:
- name: ssh-keys
mountPath: /root/.ssh
resources:
limits:
memory: "512Mi"
cpu: "500m"
requests:
memory: "128Mi"
cpu: "200m"
volumes:
- name: ssh-keys
secret:
defaultMode: 0400
secretName: ssh-keys

View File

@@ -0,0 +1,28 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: ssh-keys
spec:
data:
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: deluge-ssh
metadataPolicy: None
property: private
secretKey: id_ed25519
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: deluge-ssh
metadataPolicy: None
property: public
secretKey: id_ed25519.pub
refreshInterval: 1h
secretStoreRef:
kind: ClusterSecretStore
name: weyma-vault
target:
creationPolicy: Owner
deletionPolicy: Retain
name: ssh-keys

14
arr-stack/tunnel/svc.yaml Normal file
View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: deluge
spec:
selector:
app: deluge-tunnel
ports:
- port: 58846
targetPort: 58846
name: deluge
- port: 8112
targetPort: 8112
name: web

View File

@@ -1,4 +1,4 @@
apiVersion: external-secrets.io/v1beta1
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: attic-secret

View File

@@ -24,5 +24,5 @@ appVersion: "1.0"
dependencies:
- name: authentik
version: 2025.4.1
version: 2025.10.2
repository: https://charts.goauthentik.io

View File

@@ -25,59 +25,6 @@ authentik:
- name: cert-dubyatp-xyz
secret:
secretName: cert-dubyatp-xyz
postgresql:
enabled: true
image:
repository: bitnami/postgresql
tag: 15.8.0-debian-12-r18
auth:
username: authentik
database: authentik
existingSecret: "authentik-credentials"
secretKeys:
adminPasswordKey: "admin-password"
userPasswordKey: "user-password"
replicationPasswordKey: "replication-password"
primary:
podAnnotations:
backup.velero.io/backup-volumes: data
extendedConfiguration: |
max_connections = 500
resourcesPreset: "none"
persistence:
enabled: true
storageClass: weyma-shared
accessModes:
- ReadWriteOnce
readReplicas:
resourcesPreset: "none"
backup:
resourcesPreset: "none"
passwordUpdateJob:
resourcesPreset: "none"
volumePermissions:
resourcesPreset: "none"
metrics:
resourcesPreset: "none"
redis:
enabled: true
architecture: standalone
auth:
enabled: false
master:
resourcesPreset: "none"
podAnnotations:
backup.velero.io/backup-volumes: redis-data
replica:
resourcesPreset: "none"
sentinel:
resourcesPreset: "none"
metrics:
resourcesPreset: "none"
volumePermissions:
resourcesPreset: "none"
sysctl:
resourcesPreset: "none"
global:
env:
- name: AUTHENTIK_SECRET_KEY
@@ -85,11 +32,32 @@ authentik:
secretKeyRef:
name: authentik-credentials
key: authentik-secret-key
- name: AUTHENTIK_POSTGRESQL__HOST
value: pooler-weyma-rw.cloudnativepg.svc.cluster.local
- name: AUTHENTIK_POSTGRESQL__NAME
value: authentik
- name: AUTHENTIK_POSTGRESQL__USER
value: authentik
- name: AUTHENTIK_POSTGRESQL__PASSWORD
valueFrom:
secretKeyRef:
name: authentik-db-auth
key: password
- name: AUTHENTIK_EMAIL__FROM
value: authentik_dubyatp@em924671.dubyatp.xyz
- name: AUTHENTIK_EMAIL__HOST
value: mail.smtp2go.com
- name: AUTHENTIK_EMAIL__USE_TLS
value: "true"
- name: AUTHENTIK_EMAIL__USERNAME
value: authentik_dubyatp
- name: AUTHENTIK_EMAIL__PASSWORD
valueFrom:
secretKeyRef:
name: authentik-credentials
key: user-password
key: smtp-password
- name: AUTHENTIK_EMAIL__TIMEOUT
value: "30"
additionalObjects:
- apiVersion: networking.k8s.io/v1
kind: Ingress
@@ -124,7 +92,7 @@ authentik:
data:
tls.crt: ""
tls.key: ""
- apiVersion: external-secrets.io/v1beta1
- apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: authentik-credentials
@@ -153,3 +121,28 @@ authentik:
remoteRef:
key: authentik
property: user-password
- secretKey: smtp-password
remoteRef:
key: authentik
property: smtp-password
- apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: authentik-db-auth
spec:
data:
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: cloudnativepg
metadataPolicy: None
property: authentik_pw
secretKey: password
refreshInterval: 1h
secretStoreRef:
kind: ClusterSecretStore
name: weyma-vault
target:
creationPolicy: Owner
deletionPolicy: Retain
name: authentik-db-auth

View File

@@ -0,0 +1,61 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: dispatcharr
spec:
selector:
matchLabels:
app: dispatcharr
template:
metadata:
labels:
app: dispatcharr
annotations:
backup.velero.io/backup-volumes: data
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: extensions.talos.dev/i915
operator: Exists
nodeSelector:
kubernetes.io/hostname: weyma-talos-testw04
containers:
- name: dispatcharr
image: ghcr.io/dispatcharr/dispatcharr:0.8.0-amd64
env:
- name: DISPATCHARR_ENV
value: aio
- name: REDIS_HOST
value: localhost
- name: CELERY_BROKER_URL
value: redis://localhost:6379/0
- name: DISPATCHARR_LOG_LEVEL
value: info
- name: UWSGI_NICE_LEVEL
value: "-5"
- name: CELERY_NICE_LEVEL
value: "-5"
volumeMounts:
- name: dispatcharr-data
mountPath: /data
- name: dev-dri
mountPath: /dev/dri
resources:
limits:
memory: "3Gi"
cpu: "1"
requests:
memory: "256Mi"
cpu: "500m"
securityContext:
privileged: true
volumes:
- name: dispatcharr-data
persistentVolumeClaim:
claimName: dispatcharr
- name: dev-dri
hostPath:
path: /dev/dri

18
dispatcharr/ingress.yaml Normal file
View File

@@ -0,0 +1,18 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: dispatcharr
labels:
app.kubernetes.io/name: dispatcharr
spec:
rules:
- host: dispatcharr.dubyatp.xyz
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: dispatcharr-svc
port:
number: 9191

11
dispatcharr/pvc.yaml Normal file
View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: dispatcharr
spec:
resources:
requests:
storage: 20Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany

10
dispatcharr/svc.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: dispatcharr-svc
spec:
selector:
app: dispatcharr
ports:
- port: 9191
targetPort: 9191

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: cert-dubyatp-xyz
annotations:
replicator.v1.mittwald.de/replicate-from: "cert-manager/cert-dubyatp-xyz"
replicator.v1.mittwald.de/replicated-keys: "tls.crt,tls.key"
data:
tls.crt: ""
tls.key: ""

View File

@@ -1,79 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: emby
spec:
strategy:
type: Recreate
selector:
matchLabels:
app: emby
template:
metadata:
annotations:
backup.velero.io/backup-volumes: emby-config
labels:
app: emby
spec:
volumes:
- name: tv-shows
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/tv-shows
- name: movies
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/movies
- name: emby-config
persistentVolumeClaim:
claimName: emby-config
- name: transcode-temp
emptyDir:
sizeLimit: 8Gi
medium: Memory
- name: dev-dri
hostPath:
path: /dev/dri
containers:
- name: emby
image: emby/embyserver:4.8.11.0
volumeMounts:
- name: tv-shows
mountPath: /mnt/tv-shows
- name: movies
mountPath: /mnt/movies
- name: emby-config
mountPath: /config
- name: transcode-temp
mountPath: /tmp/transcode
- name: dev-dri
mountPath: /dev/dri
env:
- name: UID
value: "1000"
- name: GID
value: "1000"
- name: GIDLIST
value: "100"
livenessProbe:
httpGet:
path: /
port: http
securityContext:
privileged: true
resources:
limits:
memory: 8Gi
cpu: '1'
requests:
memory: 4Gi
cpu: "500m"
nodeSelector:
kubernetes.io/hostname: weyma-talos-testw04
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: extensions.talos.dev/i915
operator: Exists

View File

@@ -1,22 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: emby-ingress
annotations:
traefik.ingress.kubernetes.io/router.middlewares: cloudflarewarp@file
spec:
rules:
- host: emby.dubyatp.xyz
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: emby-http-svc
port:
number: 8096
tls:
- hosts:
- emby.dubyatp.xyz
secretName: cert-dubyatp-xyz

View File

@@ -1,39 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: resilio-sync
spec:
selector:
matchLabels:
app: resilio-sync
template:
metadata:
labels:
app: resilio-sync
spec:
containers:
- name: resilio-sync
image: lscr.io/linuxserver/resilio-sync:3.0.0
volumeMounts:
- name: config
mountPath: /config
- name: tv-shows
mountPath: /sync/tv-shows
- name: movies
mountPath: /sync/movies
resources:
limits:
memory: "700Mi"
cpu: "500m"
volumes:
- name: config
persistentVolumeClaim:
claimName: resilio-pvc
- name: tv-shows
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/tv-shows
- name: movies
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/movies

View File

@@ -1,23 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: emby-http-svc
spec:
type: ClusterIP
selector:
app: emby
ports:
- port: 8096
targetPort: 8096
---
apiVersion: v1
kind: Service
metadata:
name: emby-https-svc
spec:
type: ClusterIP
selector:
app: emby
ports:
- port: 8920
targetPort: 8920

View File

@@ -1,33 +1,39 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea-runner
annotations:
deployment.kubernetes.io/revision: "4"
labels:
app: act-runner
name: act-runner
namespace: gitea-runner
spec:
progressDeadlineSeconds: 600
replicas: 1
strategy:
type: Recreate
revisionHistoryLimit: 10
selector:
matchLabels:
app: gitea-runner
app: act-runner
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: gitea-runner
app: act-runner
spec:
restartPolicy: Always
volumes:
- name: docker-certs
emptyDir: {}
- name: runner-data
persistentVolumeClaim:
claimName: gitea-runner-pvc
containers:
- name: runner
image: gitea/act_runner:nightly
command: ["sh", "-c", "while ! nc -z localhost 2376 </dev/null; do echo 'waiting for docker daemon...'; sleep 5; done; /sbin/tini -- run.sh"]
- command:
- sh
- -c
- while ! nc -z localhost 2376 </dev/null; do echo 'waiting for docker daemon...';
sleep 5; done; /sbin/tini -- run.sh
env:
- name: DOCKER_HOST
value: tcp://127.0.0.1:2376
value: tcp://localhost:2376
- name: DOCKER_CERT_PATH
value: /certs/client
- name: DOCKER_TLS_VERIFY
@@ -37,20 +43,37 @@ spec:
- name: GITEA_RUNNER_REGISTRATION_TOKEN
valueFrom:
secretKeyRef:
name: gitea-runner-token
key: registration-token
key: token
name: runner-secret
image: gitea/act_runner:nightly
imagePullPolicy: Always
name: runner
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- name: docker-certs
mountPath: /certs
- name: runner-data
mountPath: /data
- name: daemon
image: docker:23.0.6-dind
env:
- mountPath: /certs
name: docker-certs
- mountPath: /data
name: runner-data
- env:
- name: DOCKER_TLS_CERTDIR
value: /certs
image: docker:23.0.6-dind
imagePullPolicy: IfNotPresent
name: daemon
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- name: docker-certs
mountPath: /certs
- mountPath: /certs
name: docker-certs
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
terminationGracePeriodSeconds: 30
volumes:
- name: docker-certs
- name: runner-data
persistentVolumeClaim:
claimName: act-runner-vol

View File

@@ -1,4 +1,4 @@
apiVersion: external-secrets.io/v1beta1
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: gitea-runner-token

28
gitea/Chart.yaml Normal file
View File

@@ -0,0 +1,28 @@
apiVersion: v2
name: gitea
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "1.0"
dependencies:
- name: gitea
version: 12.4.0
repository: https://dl.gitea.com/charts/

188
gitea/values.yaml Normal file
View File

@@ -0,0 +1,188 @@
gitea:
replicaCount: 3
ingress:
enabled: true
hosts:
- host: git.dubyatp.xyz
paths:
- path: /
tls:
- secretName: cert-dubyatp-xyz
hosts:
- git.dubyatp.xyz
persistence:
enabled: true
create: true
mount: true
claimName: gitea-shared-storage
size: 50Gi
accessModes:
- ReadWriteMany
storageClass: weyma-shared
deployment:
annotations:
backup.velero.io/backup-volumes: data
env:
- name: GITEA__database__PASSWD
valueFrom:
secretKeyRef:
key: password
name: gitea-db-auth
- name: GITEA__mailer__PASSWD
valueFrom:
secretKeyRef:
key: smtp_smtp2go
name: gitea-secrets
- name: GITEA__security__INTERNAL_TOKEN
valueFrom:
secretKeyRef:
key: internal_token
name: gitea-secrets
- name: GITEA__security__SECRET_KEY
valueFrom:
secretKeyRef:
key: secret_key
name: gitea-secrets
- name: GITEA__oauth2__JWT_SECRET
valueFrom:
secretKeyRef:
key: oauth2_jwt
name: gitea-secrets
gitea:
admin:
passwordMode: initialOnlyNoReset
podAnnotations:
backup.velero.io/backup-volumes: data
config:
database:
DB_TYPE: postgres
HOST: pooler-weyma-rw.cloudnativepg.svc.cluster.local
NAME: gitea
USER: gitea
server:
DISABLE_SSH: false
DOMAIN: git.dubyatp.xyz
ENABLE_PPROF: false
ROOT_URL: https://git.dubyatp.xyz
SSH_DOMAIN: git-ssh.dubyatp.xyz
SSH_LISTEN_PORT: 22
SSH_PORT: 22
START_SSH_SERVER: true
OFFLINE_MODE: false
service:
DISABLE_REGISTRATION: false
webhook:
ALLOWED_HOST_LIST: "drone.infra.dubyatp.xyz,argocd.infra.dubyatp.xyz,discord.com,10.0.0.0/8"
mailer:
ENABLED: true
FROM: gitea@em924671.dubyatp.xyz
PROTOCOL: smtps
SMTP_ADDR: mail.smtp2go.com
SMTP_PORT: 465
USER: gitea_dubyatp
security:
INSTALL_LOCK: true
metrics:
enabled: true
serviceMonitor:
enabled: true
extraDeploy:
- apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
name: gitea-ssh
spec:
entryPoints:
- gitssh
routes:
- match: HostSNI(`*`)
priority: 1
services:
- name: gitea-ssh
port: 22
- apiVersion: v1
kind: Secret
metadata:
name: cert-dubyatp-xyz
annotations:
replicator.v1.mittwald.de/replicate-from: "cert-manager/cert-dubyatp-xyz"
replicator.v1.mittwald.de/replicated-keys: "tls.crt,tls.key"
data:
tls.crt: ""
tls.key: ""
- apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: gitea-db-auth
spec:
data:
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: cloudnativepg
metadataPolicy: None
property: gitea_pw
secretKey: password
refreshInterval: 1h
secretStoreRef:
kind: ClusterSecretStore
name: weyma-vault
target:
creationPolicy: Owner
deletionPolicy: Retain
name: gitea-db-auth
- apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: gitea-secrets
spec:
data:
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: gitea
metadataPolicy: None
property: internal_token
secretKey: internal_token
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: gitea
metadataPolicy: None
property: oauth2_jwt
secretKey: oauth2_jwt
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: gitea
metadataPolicy: None
property: secret_key
secretKey: secret_key
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: gitea
metadataPolicy: None
property: smtp_smtp2go
secretKey: smtp_smtp2go
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: gitea
metadataPolicy: None
property: gitea_admin
secretKey: gitea_admin
refreshInterval: 1h
secretStoreRef:
kind: ClusterSecretStore
name: weyma-vault
target:
creationPolicy: Owner
deletionPolicy: Retain
name: gitea-secrets
postgresql-ha:
enabled: false
valkey-cluster:
enabled: true
valkey:
resourcesPreset: "small"

View File

@@ -24,5 +24,5 @@ appVersion: "1.0"
dependencies:
- name: grafana
version: 9.0.0
version: 10.2.0
repository: https://grafana.github.io/helm-charts

View File

@@ -3,17 +3,8 @@ grafana:
existingSecret: grafana-admin
passwordKey: passwordKey
userKey: userKey
affinity: {}
alerting: {}
assertNoLeakedSecrets: true
automountServiceAccountToken: true
autoscaling:
behavior: {}
enabled: false
maxReplicas: 5
minReplicas: 1
targetCPU: "60"
targetMemory: ""
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
@@ -22,52 +13,16 @@ grafana:
seccompProfile:
type: RuntimeDefault
createConfigmap: true
dashboardProviders: {}
dashboards: {}
dashboardsConfigMaps: {}
datasources: {}
defaultCurlOptions: -skf
deploymentStrategy:
type: RollingUpdate
dnsConfig: {}
dnsPolicy: null
downloadDashboards:
env: {}
envFromSecret: ""
envValueFrom: {}
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
downloadDashboardsImage:
pullPolicy: IfNotPresent
registry: docker.io
repository: curlimages/curl
sha: ""
tag: 8.9.1
enableKubeBackwardCompatibility: false
type: Recreate
enableServiceLinks: true
env: {}
envFromConfigMaps:
- name: grafana-env
envFromSecret: ""
envFromSecrets:
- name: grafana-secretenv
envRenderSecret: {}
envValueFrom: {}
extraConfigmapMounts: []
extraContainerVolumes: []
extraContainers: ""
extraEmptyDirMounts: []
extraExposePorts: []
extraInitContainers: []
extraLabels: {}
extraObjects:
- apiVersion: external-secrets.io/v1beta1
- apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: grafana-admin
@@ -95,7 +50,7 @@ grafana:
creationPolicy: Owner
deletionPolicy: Retain
name: grafana-admin
- apiVersion: external-secrets.io/v1beta1
- apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: grafana-secretenv
@@ -148,13 +103,6 @@ grafana:
data:
tls.crt: ""
tls.key: ""
extraSecretMounts: []
extraVolumeMounts: []
extraVolumes: []
global:
imagePullSecrets: []
imageRegistry: null
gossipPortName: gossip
grafana.ini:
analytics:
check_for_updates: true
@@ -170,93 +118,14 @@ grafana:
server:
domain: '{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ tpl (.Values.ingress.hosts
| first) . }}{{ else }}''''{{ end }}'
headlessService: false
hostAliases: []
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: grafana/grafana
sha: ""
tag: ""
imageRenderer:
affinity: {}
automountServiceAccountToken: false
autoscaling:
behavior: {}
enabled: false
maxReplicas: 5
minReplicas: 1
targetCPU: "60"
targetMemory: ""
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
seccompProfile:
type: RuntimeDefault
deploymentStrategy: {}
enabled: false
env:
HTTP_HOST: 0.0.0.0
XDG_CACHE_HOME: /tmp/.chromium
XDG_CONFIG_HOME: /tmp/.chromium
envValueFrom: {}
extraConfigmapMounts: []
extraSecretMounts: []
extraVolumeMounts: []
extraVolumes: []
grafanaProtocol: http
grafanaSubPath: ""
hostAliases: []
image:
pullPolicy: Always
pullSecrets: []
registry: docker.io
repository: grafana/grafana-image-renderer
sha: ""
tag: latest
networkPolicy:
extraIngressSelectors: []
limitEgress: false
limitIngress: true
nodeSelector: {}
podAnnotations: {}
podPortName: http
priorityClassName: ""
renderingCallbackURL: ""
replicas: 1
resources: {}
revisionHistoryLimit: 10
securityContext: {}
serverURL: ""
service:
appProtocol: ""
enabled: true
port: 8081
portName: http
targetPort: 8081
serviceAccountName: ""
serviceMonitor:
enabled: false
interval: 1m
labels: {}
path: /metrics
relabelings: []
scheme: http
scrapeTimeout: 30s
targetLabels: []
tlsConfig: {}
tolerations: []
ingress:
annotations: {}
enabled: true
extraPaths: []
hosts:
- grafana.infra.dubyatp.xyz
labels: {}
path: /
pathType: Prefix
tls:
@@ -265,13 +134,6 @@ grafana:
secretName: cert-dubyatp-xyz
initChownData:
enabled: true
image:
pullPolicy: IfNotPresent
registry: docker.io
repository: library/busybox
sha: ""
tag: 1.37.0
resources: {}
securityContext:
capabilities:
add:
@@ -283,11 +145,6 @@ grafana:
runAsUser: 0
seccompProfile:
type: RuntimeDefault
ldap:
config: ""
enabled: false
existingSecret: ""
lifecycleHooks: {}
livenessProbe:
failureThreshold: 10
httpGet:
@@ -295,227 +152,45 @@ grafana:
port: 3000
initialDelaySeconds: 60
timeoutSeconds: 30
namespaceOverride: ""
networkPolicy:
allowExternal: true
egress:
blockDNSResolution: false
enabled: false
ports: []
to: []
enabled: false
explicitNamespacesSelector: {}
ingress: true
nodeSelector: {}
notifiers: {}
persistence:
accessModes:
- ReadWriteOnce
disableWarning: false
enabled: true
extraPvcLabels: {}
finalizers:
- kubernetes.io/pvc-protection
inMemory:
enabled: false
lookupVolumeName: true
size: 10Gi
type: pvc
volumeName: ""
plugins: []
podDisruptionBudget: {}
podPortName: grafana
podAnnotations:
backup.velero.io/backup-volumes: "storage"
rbac:
create: true
extraClusterRoleRules: []
extraRoleRules: []
namespaced: false
pspEnabled: false
pspUseAppArmor: false
readinessProbe:
httpGet:
path: /api/health
port: 3000
replicas: 1
resources: {}
revisionHistoryLimit: 10
route:
main:
additionalRules: []
annotations: {}
apiVersion: gateway.networking.k8s.io/v1
enabled: false
filters: []
hostnames: []
kind: HTTPRoute
labels: {}
matches:
- path:
type: PathPrefix
value: /
parentRefs: []
securityContext:
fsGroup: 472
runAsGroup: 472
runAsNonRoot: true
runAsUser: 472
service:
annotations: {}
appProtocol: ""
enabled: true
ipFamilies: []
ipFamilyPolicy: ""
labels: {}
loadBalancerClass: ""
loadBalancerIP: ""
loadBalancerSourceRanges: []
port: 80
portName: service
sessionAffinity: ""
targetPort: 3000
type: ClusterIP
serviceAccount:
automountServiceAccountToken: false
create: true
labels: {}
name: null
nameTest: null
serviceMonitor:
basicAuth: {}
enabled: false
interval: 30s
labels: {}
metricRelabelings: []
path: /metrics
relabelings: []
scheme: http
scrapeTimeout: 30s
targetLabels: []
tlsConfig: {}
shareProcessNamespace: false
sidecar:
alerts:
enabled: false
env: {}
extraMounts: []
initAlerts: false
label: grafana_alert
labelValue: ""
reloadURL: http://localhost:3000/api/admin/provisioning/alerting/reload
resource: both
resourceName: ""
script: null
searchNamespace: null
sizeLimit: {}
skipReload: false
watchMethod: WATCH
dashboards:
SCProvider: true
defaultFolderName: null
enabled: false
env: {}
envValueFrom: {}
extraMounts: []
folder: /tmp/dashboards
folderAnnotation: null
label: grafana_dashboard
labelValue: ""
provider:
allowUiUpdates: false
disableDelete: false
folder: ""
folderUid: ""
foldersFromFilesStructure: false
name: sidecarProvider
orgid: 1
type: file
reloadURL: http://localhost:3000/api/admin/provisioning/dashboards/reload
resource: both
resourceName: ""
script: null
searchNamespace: null
sizeLimit: {}
skipReload: false
watchMethod: WATCH
datasources:
enabled: false
env: {}
envValueFrom: {}
extraMounts: []
initDatasources: false
label: grafana_datasource
labelValue: ""
reloadURL: http://localhost:3000/api/admin/provisioning/datasources/reload
resource: both
resourceName: ""
script: null
searchNamespace: null
sizeLimit: {}
skipReload: false
watchMethod: WATCH
enableUniqueFilenames: false
image:
registry: quay.io
repository: kiwigrid/k8s-sidecar
sha: ""
tag: 1.30.3
imagePullPolicy: IfNotPresent
livenessProbe: {}
notifiers:
enabled: false
env: {}
extraMounts: []
initNotifiers: false
label: grafana_notifier
labelValue: ""
reloadURL: http://localhost:3000/api/admin/provisioning/notifications/reload
resource: both
resourceName: ""
script: null
searchNamespace: null
sizeLimit: {}
skipReload: false
watchMethod: WATCH
plugins:
enabled: false
env: {}
extraMounts: []
initPlugins: false
label: grafana_plugin
labelValue: ""
reloadURL: http://localhost:3000/api/admin/provisioning/plugins/reload
resource: both
resourceName: ""
script: null
searchNamespace: null
sizeLimit: {}
skipReload: false
watchMethod: WATCH
readinessProbe: {}
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
smtp:
existingSecret: ""
passwordKey: password
userKey: user
testFramework:
containerSecurityContext: {}
enabled: true
image:
registry: docker.io
repository: bats/bats
tag: 1.12.0
tag: 1.13.0
imagePullPolicy: IfNotPresent
resources: {}
securityContext: {}
tolerations: []
topologySpreadConstraints: []
useStatefulSet: false

View File

@@ -13,7 +13,7 @@ spec:
spec:
containers:
- name: immich-ml
image: ghcr.io/immich-app/immich-machine-learning:v1.132.3
image: ghcr.io/immich-app/immich-machine-learning:v1.134.0
volumeMounts:
- name: model-cache
mountPath: /cache
@@ -23,7 +23,7 @@ spec:
mountPath: /dev/dri
env:
- name: DB_HOSTNAME
value: "weyma-pgsql-rw.cloudnativepg.svc.cluster.local"
value: "immich-rw.cloudnativepg.svc.cluster.local"
- name: DB_DATABASE_NAME
value: "immich"
- name: DB_USERNAME

View File

@@ -1,4 +1,4 @@
apiVersion: external-secrets.io/v1beta1
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: postgres-credentials

View File

@@ -13,7 +13,7 @@ spec:
spec:
containers:
- name: immich-server
image: ghcr.io/immich-app/immich-server:v1.132.3
image: ghcr.io/immich-app/immich-server:v1.134.0
volumeMounts:
- name: library
mountPath: /usr/src/app/upload
@@ -23,7 +23,7 @@ spec:
mountPath: /dev/dri
env:
- name: DB_HOSTNAME
value: "weyma-pgsql-rw.cloudnativepg.svc.cluster.local"
value: "immich-rw.cloudnativepg.svc.cluster.local"
- name: DB_DATABASE_NAME
value: "immich"
- name: DB_USERNAME

28
jellyfin/Chart.yaml Normal file
View File

@@ -0,0 +1,28 @@
apiVersion: v2
name: jellyfin
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: "1.0"
dependencies:
- name: jellyfin
version: 2.5.0
repository: https://jellyfin.github.io/jellyfin-helm

View File

@@ -0,0 +1,33 @@
{{- if and (.Values.jellyfin.metrics.enabled) (.Values.jellyfin.ingress.enabled) -}}
---
apiVersion: v1
kind: Service
metadata:
name: dummy-svc
namespace: {{ .Release.Namespace }}
spec:
selector:
app: dummy-svc
ports:
- protocol: TCP
port: 6767
targetPort: 6767
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: block-metrics
namespace: {{ .Release.Namespace }}
spec:
rules:
- host: {{ (index .Values.jellyfin.ingress.hosts 0).host }}
http:
paths:
- pathType: Prefix
path: "/metrics"
backend:
service:
name: dummy-svc
port:
number: 6767
{{- end }}

View File

@@ -0,0 +1,26 @@
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: emby-redirect
spec:
redirectRegex:
regex: ^https?://emby\.dubyatp\.xyz/(.*)$
replacement: https://jellyfin.dubyatp.xyz/${1}
permanent: true
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: emby-redirect
spec:
entryPoints:
- websecure
- web
routes:
- kind: Rule
match: Host(`emby.dubyatp.xyz`)
middlewares:
- name: emby-redirect
services:
- name: noop@internal
kind: TraefikService

View File

@@ -0,0 +1,11 @@
apiVersion: v1
data:
tls.crt:
tls.key:
kind: Secret
metadata:
annotations:
replicator.v1.mittwald.de/replicate-from: cert-manager/cert-dubyatp-xyz
replicator.v1.mittwald.de/replicated-keys: tls.crt,tls.key
name: cert-dubyatp-xyz
type: Opaque

73
jellyfin/values.yaml Normal file
View File

@@ -0,0 +1,73 @@
jellyfin:
deploymentStrategy:
type: Recreate
ingress:
enabled: true
hosts:
- host: jellyfin.dubyatp.xyz
paths:
- path: /
pathType: ImplementationSpecific
tls:
- secretName: cert-dubyatp.xyz
hosts:
- jellyfin.dubyatp.xyz
persistence:
config:
size: 25Gi
media:
enabled: false
volumes:
- name: tv-shows
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/tv-shows
- name: movies
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/movies
- name: dvr
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/DVR
- name: youtube-vids
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/youtube-vids
- name: transcode-temp
emptyDir:
sizeLimit: 8Gi
medium: Memory
- name: dev-dri
hostPath:
path: /dev/dri
metrics:
enabled: true
serviceMonitor:
enabled: true
volumeMounts:
- name: tv-shows
mountPath: /mnt/tv-shows
- name: movies
mountPath: /mnt/movies
- name: dvr
mountPath: /mnt/dvr
- name: youtube-vids
mountPath: /mnt/youtube-vids
- name: transcode-temp
mountPath: /tmp/transcode
- name: dev-dri
mountPath: /dev/dri
podAnnotations:
backup.velero.io/backup-volumes: config
securityContext:
privileged: true
nodeSelector:
kubernetes.io/hostname: weyma-talos-testw04
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: extensions.talos.dev/i915
operator: Exists

25
netmaker/config.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: netmaker-config
data:
SERVER_NAME: netmaker.infra.dubyatp.xyz
SERVER_API_CONN_STRING: api.netmaker.infra.dubyatp.xyz:443
SERVER_HTTP_HOST: api.netmaker.infra.dubyatp.xyz
API_PORT: "8081"
WG_QUICK_USERSPACE_IMPLEMENTATION: wireguard-go
DNS_MODE: "off"
DISPLAY_KEYS: "on"
DATABASE: postgres
SQL_HOST: "pooler-weyma-rw.cloudnativepg.svc.cluster.local"
SQL_PORT: "5432"
SQL_DB: "netmaker"
SQL_USER: "netmaker"
MQ_USERNAME: netmaker
CORS_ALLOWED_ORIGIN: '*'
SERVER_BROKER_ENDPOINT: "ws://mq:1883"
BROKER_ENDPOINT: "wss://broker.netmaker.infra.dubyatp.xyz"
PLATFORM: "Kubernetes"
VERBOSITY: "3"
K8s: "true"
CACHING_ENABLED: "false"

16
netmaker/ingress.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: netmaker-api-ingress
spec:
rules:
- host: api.netmaker.infra.dubyatp.xyz
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: netmaker-rest
port:
number: 8081

View File

@@ -0,0 +1,11 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: shared-certs-pvc
spec:
storageClassName: weyma-shared
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Mi

View File

@@ -0,0 +1,38 @@
apiVersion: v1
data:
mosquitto.conf: |
per_listener_settings false
listener 8883
protocol websockets
allow_anonymous false
listener 1883
protocol websockets
allow_anonymous false
password_file /mosquitto/temp/password.txt
wait.sh: |
#!/bin/ash
encrypt_password() {
echo "${MQ_USERNAME}:${MQ_PASSWORD}" > /mosquitto/temp/password.txt
mosquitto_passwd -U /mosquitto/temp/password.txt
chmod 0700 /mosquitto/temp/password.txt
}
main(){
encrypt_password
echo "Starting MQ..."
# Run the main container command.
/docker-entrypoint.sh
/usr/sbin/mosquitto -c /mosquitto/config/mosquitto.conf
}
main "${@}"
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/instance: mosquitto
app.kubernetes.io/name: mosquitto
name: mosquitto-config
namespace: netmaker

View File

@@ -0,0 +1,83 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mosquitto
spec:
progressDeadlineSeconds: 600
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: mosquitto
app.kubernetes.io/name: mosquitto
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/instance: mosquitto
app.kubernetes.io/name: mosquitto
spec:
containers:
- image: eclipse-mosquitto:2.0.22-openssl
imagePullPolicy: IfNotPresent
command: ["/mosquitto/config/wait.sh"]
livenessProbe:
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: 8883
timeoutSeconds: 1
name: mosquitto
env:
- name: MQ_USERNAME
value: netmaker
- name: MQ_PASSWORD
valueFrom:
secretKeyRef:
key: mq_password
name: netmaker-secrets
ports:
- containerPort: 1883
name: mqtt
protocol: TCP
- containerPort: 8883
name: mqtt2
protocol: TCP
readinessProbe:
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: 8883
timeoutSeconds: 1
resources: {}
startupProbe:
failureThreshold: 30
periodSeconds: 5
successThreshold: 1
tcpSocket:
port: 8883
timeoutSeconds: 1
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /mosquitto/config
name: mosquitto-config
- mountPath: /mosquitto/certs
name: shared-certs
- mountPath: /mosquitto/temp
name: mosquitto-temp
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds: 30
volumes:
- configMap:
name: mosquitto-config
defaultMode: 0755
name: mosquitto-config
- name: mosquitto-temp
emptyDir:
- name: shared-certs
persistentVolumeClaim:
claimName: shared-certs-pvc

View File

@@ -0,0 +1,18 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mosquitto-ingress
labels:
app.kubernetes.io/name: mosquitto-ingress
spec:
rules:
- host: broker.netmaker.infra.dubyatp.xyz
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: mq
port:
number: 8883

View File

@@ -0,0 +1,36 @@
apiVersion: v1
kind: Service
metadata:
name: mq
namespace: netmaker
spec:
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: mqtt
- name: mqtt2
port: 8883
protocol: TCP
targetPort: mqtt2
selector:
app.kubernetes.io/instance: mosquitto
app.kubernetes.io/name: mosquitto
sessionAffinity: None
---
apiVersion: v1
kind: Service
metadata:
name: 'netmaker-mqtt'
spec:
externalTrafficPolicy: Cluster
type: NodePort
selector:
app.kubernetes.io/instance: mosquitto
app.kubernetes.io/name: mosquitto
ports:
- port: 31883
nodePort: 31883
protocol: TCP
targetPort: 8883
name: nm-mqtt

View File

@@ -0,0 +1,21 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: postgres-pw
spec:
data:
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: cloudnativepg
metadataPolicy: None
property: netmaker_pw
secretKey: password
refreshInterval: 1h
secretStoreRef:
kind: ClusterSecretStore
name: weyma-vault
target:
creationPolicy: Owner
deletionPolicy: Retain
name: postgres-pw

35
netmaker/secrets.yaml Normal file
View File

@@ -0,0 +1,35 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: netmaker-secrets
spec:
data:
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: netmaker
metadataPolicy: None
property: master_key
secretKey: master_key
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: netmaker
metadataPolicy: None
property: mq_password
secretKey: mq_password
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: netmaker
metadataPolicy: None
property: turn_password
secretKey: turn_password
refreshInterval: 1h
secretStoreRef:
kind: ClusterSecretStore
name: weyma-vault
target:
creationPolicy: Owner
deletionPolicy: Retain
name: netmaker-secrets

95
netmaker/statefulset.yaml Normal file
View File

@@ -0,0 +1,95 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: netmaker
name: netmaker
spec:
replicas: 3
serviceName: netmaker-headless
selector:
matchLabels:
app: netmaker
template:
metadata:
labels:
app: netmaker
spec:
initContainers:
- name: init-sysctl
image: busybox
imagePullPolicy: IfNotPresent
command: ["/bin/sh", "-c"]
args: ["sysctl -w net.ipv4.ip_forward=1 && sysctl -w net.ipv4.conf.all.src_valid_mark=1 && sysctl -w net.ipv6.conf.all.disable_ipv6=0 && sysctl -w net.ipv6.conf.all.forwarding=1"]
securityContext:
privileged: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- env:
- name: NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: SQL_PASS
valueFrom:
secretKeyRef:
key: password
name: postgres-pw
- name: MASTER_KEY
valueFrom:
secretKeyRef:
key: master_key
name: netmaker-secrets
- name: MQ_PASSWORD
valueFrom:
secretKeyRef:
key: mq_password
name: netmaker-secrets
- name: TURN_SERVER_PASSWORD
valueFrom:
secretKeyRef:
key: turn_password
name: netmaker-secrets
envFrom:
- configMapRef:
name: netmaker-config
image: gravitl/netmaker:v1.1.0
imagePullPolicy: Always
name: netmaker
ports:
- containerPort: 8081
protocol: TCP
- containerPort: 31821
protocol: UDP
- containerPort: 31822
protocol: UDP
- containerPort: 31823
protocol: UDP
- containerPort: 31824
protocol: UDP
- containerPort: 31825
protocol: UDP
- containerPort: 31826
protocol: UDP
- containerPort: 31827
protocol: UDP
- containerPort: 31828
protocol: UDP
- containerPort: 31829
protocol: UDP
- containerPort: 31830
protocol: UDP
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/netmaker/
name: shared-certs
volumes:
- name: shared-certs
persistentVolumeClaim:
claimName: shared-certs-pvc

14
netmaker/svc.yaml Normal file
View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: 'netmaker-rest'
spec:
ports:
- name: rest
port: 8081
protocol: TCP
targetPort: 8081
selector:
app: 'netmaker'
sessionAffinity: None
type: ClusterIP

View File

@@ -0,0 +1,21 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: netmaker-ui
spec:
replicas: 2
selector:
matchLabels:
app: netmaker-ui
template:
metadata:
labels:
app: netmaker-ui
spec:
containers:
- name: netmaker-ui
image: gravitl/netmaker-ui:v1.1.0
env:
- name: BACKEND_URL
value: 'https://api.netmaker.infra.dubyatp.xyz'
terminationGracePeriodSeconds: 15

16
netmaker/ui/ingress.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: netmaker-ui-ingress
spec:
rules:
- host: dashboard.netmaker.infra.dubyatp.xyz
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: netmaker-ui
port:
number: 80

13
netmaker/ui/svc.yaml Normal file
View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: 'netmaker-ui'
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: 'netmaker-ui'
sessionAffinity: None
type: 'ClusterIP'

View File

@@ -1,4 +1,4 @@
apiVersion: external-secrets.io/v1beta1
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: nextcloud-secret

10
peertube/bucket.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: peertube-bucket
namespace: peertube
spec:
generateBucketName: peertube
storageClassName: weyma-s3-bucket
additionalConfig:
maxSize: "100Gi"

35
peertube/config.yaml Normal file
View File

@@ -0,0 +1,35 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: peertube-config
data:
PEERTUBE_INSTANCE_NAME: "dubyatp peertube"
PEERTUBE_INSTANCE_DESCRIPTION: "duby's peertube instance"
POSTGRES_USER: peertube
POSTGRES_DB: peertube
PEERTUBE_DB_USERNAME: peertube
PEERTUBE_DB_HOSTNAME: pooler-weyma-rw.cloudnativepg.svc.cluster.local
PEERTUBE_DB_PORT: "5432"
PEERTUBE_WEBSERVER_HOSTNAME: "tube.dubyatp.xyz"
PEERTUBE_TRUST_PROXY: '["127.0.0.1", "loopback", "172.18.0.0/16"]'
PEERTUBE_SMTP_USERNAME: "peertube_dubyatp"
PEERTUBE_SMTP_HOSTNAME: "mail.smtp2go.com"
PEERTUBE_SMTP_PORT: "465"
PEERTUBE_SMTP_TLS: "true"
PEERTUBE_SMTP_FROM: "peertube@em924671.dubyatp.xyz"
PEERTUBE_ADMIN_EMAIL: "me@williamtpeebles.com"
#PEERTUBE_OBJECT_STORAGE_ENABLED: "true"
#PEERTUBE_OBJECT_STORAGE_ENDPOINT: "https://weyma-s3.infra.dubyatp.xyz"
#PEERTUBE_OBJECT_STORAGE_REGION: ""
#PEERTUBE_OBJECT_STORAGE_STREAMING_PLAYLISTS_BUCKET_NAME: "peertube-953221d2-7649-48b2-b79f-5a9e59daedbb"
#PEERTUBE_OBJECT_STORAGE_STREAMING_PLAYLISTS_PREFIX: "streaming/"
#PEERTUBE_OBJECT_STORAGE_WEB_VIDEOS_BUCKET_NAME: "peertube-953221d2-7649-48b2-b79f-5a9e59daedbb"
#PEERTUBE_OBJECT_STORAGE_WEB_VIDEOS_PREFIX: "videos/"
#PEERTUBE_OBJECT_STORAGE_USER_EXPORTS_BUCKET_NAME: "peertube-953221d2-7649-48b2-b79f-5a9e59daedbb"
#PEERTUBE_OBJECT_STORAGE_USER_EXPORTS_PREFIX: "exports/"
#PEERTUBE_OBJECT_STORAGE_ORIGINAL_VIDEO_FILES_BUCKET_NAME: "peertube-953221d2-7649-48b2-b79f-5a9e59daedbb"
#PEERTUBE_OBJECT_STORAGE_ORIGINAL_VIDEO_FILES_PREFIX: "original-videos/"
#PEERTUBE_OBJECT_STORAGE_CAPTIONS_BUCKET_NAME: "peertube-953221d2-7649-48b2-b79f-5a9e59daedbb"
#PEERTUBE_OBJECT_STORAGE_CAPTIONS_PREFIX: "captions/"
#PEERTUBE_OBJECT_STORAGE_UPLOAD_ACL_PUBLIC: "public-read"
#PEERTUBE_OBJECT_STORAGE_UPLOAD_ACL_PRIVATE: "private"

69
peertube/deployment.yaml Normal file
View File

@@ -0,0 +1,69 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: peertube
labels:
app: peertube
spec:
replicas: 1
selector:
matchLabels:
app: peertube
template:
metadata:
labels:
app: peertube
spec:
containers:
- name: peertube
image: chocobozzz/peertube:v7.2.3-bookworm
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
name: http
- containerPort: 443
name: https
- containerPort: 9000
name: peertube
- containerPort: 1935
name: rtmp
envFrom:
- secretRef:
name: peertube-secret
- secretRef:
name: peertube-bucket
- configMapRef:
name: peertube-config
env:
- name: PEERTUBE_REDIS_HOSTNAME
value: "localhost"
- name: PEERTUBE_REDIS_AUTH
value: ""
volumeMounts:
- name: peertube-data
mountPath: /data
resources:
requests:
cpu: "0.5"
memory: 1Gi
limits:
cpu: "1"
memory: 2Gi
- name: redis
image: redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
ports:
- containerPort: 6379
name: redis
resources:
requests:
cpu: "0.2"
memory: 256Mi
limits:
cpu: "0.5"
memory: 1Gi
volumes:
- name: peertube-data
persistentVolumeClaim:
claimName: peertube-data

18
peertube/ingress.yaml Normal file
View File

@@ -0,0 +1,18 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: peertube
labels:
app.kubernetes.io/name: peertube
spec:
rules:
- host: tube.dubyatp.xyz
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: peertube
port:
number: 9000

10
peertube/pvc.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: peertube-data
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 50Gi

42
peertube/secret.yaml Normal file
View File

@@ -0,0 +1,42 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: peertube-secret
spec:
data:
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: peertube
metadataPolicy: None
property: PEERTUBE_SECRET
secretKey: PEERTUBE_SECRET
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: peertube
metadataPolicy: None
property: PEERTUBE_DB_PASSWORD
secretKey: PEERTUBE_DB_PASSWORD
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: peertube
metadataPolicy: None
property: PEERTUBE_SMTP_PASSWORD
secretKey: PEERTUBE_SMTP_PASSWORD
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: peertube
metadataPolicy: None
property: POSTGRES_PASSWORD
secretKey: POSTGRES_PASSWORD
refreshInterval: 1h
secretStoreRef:
kind: ClusterSecretStore
name: weyma-vault
target:
creationPolicy: Owner
deletionPolicy: Retain
name: peertube-secret

24
peertube/service.yaml Normal file
View File

@@ -0,0 +1,24 @@
kind: Service
apiVersion: v1
metadata:
name: peertube
spec:
selector:
app: peertube
ports:
- protocol: TCP
port: 80
targetPort: 80
name: http
- protocol: TCP
port: 25
targetPort: 25
name: smtp
- protocol: TCP
port: 9000
targetPort: 9000
name: peertube
- protocol: TCP
name: rtmp
port: 1935
targetPort: 1935

16
peertube/valkey.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: hyperspike.io/v1
kind: Valkey
metadata:
name: peertube-kv
labels:
app.kubernetes.io/instance: peertube
spec:
anonymousAuth: true
certIssuerType: ClusterIssuer
clusterDomain: cluster.local
clusterPreferredEndpointType: ip
nodes: 1
prometheus: false
replicas: 3
tls: false
volumePermissions: true

View File

@@ -1,3 +1,6 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json"
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"kubernetes": {
"managerFilePatterns": ["deployment.yaml", "statefulset.yaml", "cron.yaml", "cronjob.yaml"]
}
}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: renovate-config
data:
config.json: |-
{
"repositories": [
"infrastructure/core-apps",
"infrastructure/db-operators",
"infrastructure/weyma-talos",
"williamp/dubyatp.xyz",
"williamp/yt-dlp-bot"
]
}

View File

@@ -0,0 +1,54 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: renovate-bot
spec:
schedule: '@hourly'
concurrencyPolicy: Forbid
jobTemplate:
spec:
template:
spec:
containers:
- image: renovate/renovate:40.14.6
name: renovate-bot
env: # For illustration purposes, please use secrets.
- name: RENOVATE_PLATFORM
value: 'gitea'
- name: RENOVATE_ENDPOINT
value: 'https://git.dubyatp.xyz/api/v1'
- name: RENOVATE_TOKEN
valueFrom:
secretKeyRef:
key: gitea-pat
name: renovate-gitea-token
- name: RENOVATE_GITHUB_COM_TOKEN
valueFrom:
secretKeyRef:
key: github-com-pat
name: renovate-github-com-token
- name: RENOVATE_GIT_PRIVATE_KEY
valueFrom:
secretKeyRef:
key: ssh-key
name: renovate-ssh-key
- name: RENOVATE_AUTODISCOVER
value: 'false'
- name: RENOVATE_BASE_DIR
value: '/tmp/renovate/'
- name: RENOVATE_CONFIG_FILE
value: '/opt/renovate/config.json'
- name: LOG_LEVEL
value: debug
volumeMounts:
- name: config-volume
mountPath: /opt/renovate/
- name: work-volume
mountPath: /tmp/renovate/
restartPolicy: Never
volumes:
- name: config-volume
configMap:
name: renovate-config
- name: work-volume
emptyDir: {}

View File

@@ -0,0 +1,17 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: renovate-gitea-token
spec:
refreshInterval: 1h
secretStoreRef:
name: weyma-vault
kind: ClusterSecretStore
target:
name: renovate-gitea-token
creationPolicy: Owner
data:
- secretKey: gitea-pat
remoteRef:
key: renovate
property: gitea-pat

View File

@@ -0,0 +1,17 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: renovate-github-com-token
spec:
refreshInterval: 1h
secretStoreRef:
name: weyma-vault
kind: ClusterSecretStore
target:
name: renovate-github-com-token
creationPolicy: Owner
data:
- secretKey: github-com-pat
remoteRef:
key: renovate
property: github-com-pat

View File

@@ -0,0 +1,17 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: renovate-ssh-key
spec:
refreshInterval: 1h
secretStoreRef:
name: weyma-vault
kind: ClusterSecretStore
target:
name: renovate-ssh-key
creationPolicy: Owner
data:
- secretKey: ssh-key
remoteRef:
key: renovate
property: ssh-key

View File

@@ -6,10 +6,10 @@ data:
DATA_FOLDER: config
DOMAIN: https://vaultwarden.dubyatp.xyz
SIGNUPS_ALLOWED: "false"
SMTP_FROM: bitwarden@em3532.williamtpeebles.com
SMTP_FROM: vaultwarden@em924671.dubyatp.xyz
SMTP_FROM_NAME: Vaultwarden
SMTP_HOST: smtp.sendgrid.net
SMTP_PORT: "587"
SMTP_SECURITY: starttls
SMTP_HOST: mail.smtp2go.com
SMTP_PORT: "2525"
SMTP_SECURITY: "off"
SMTP_TIMEOUT: "15"
SMTP_USERNAME: apikey
SMTP_USERNAME: vaultwarden_dubyatp

View File

@@ -1,4 +1,4 @@
apiVersion: external-secrets.io/v1beta1
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: vaultwarden-secrets
@@ -19,7 +19,11 @@ spec:
remoteRef:
key: vaultwarden
property: hibp_api_key
- secretKey: SMTP_PASSWORD
- secretKey: SMTP_PASSWORD_OLD
remoteRef:
key: vaultwarden
property: smtp_password
- secretKey: SMTP_PASSWORD
remoteRef:
key: vaultwarden
property: smtp_password_smtp2go

View File

@@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: yt-dlp-bot
spec:
replicas: 1
selector:
matchLabels:
app: yt-dlp-bot
template:
metadata:
labels:
app: yt-dlp-bot
spec:
containers:
- name: yt-dlp-bot
image: 'git.dubyatp.xyz/williamp/yt-dlp-bot:1ef217f'
env:
- name: OUT_PATH
value: /data/youtube-vids
- name: TEMP_PATH
value: /tmp/ytdlp-temp
envFrom:
- secretRef:
name: yt-dlp-discord-token
volumeMounts:
- name: youtube-vids
mountPath: /data/youtube-vids
- name: temp
mountPath: /tmp/ytdlp-temp
resources:
limits:
memory: "3Gi"
cpu: "1"
volumes:
- name: youtube-vids
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/youtube-vids
- name: temp
emptyDir:
medium: Memory
strategy:
type: Recreate

21
yt-dlp-bot/secret.yaml Normal file
View File

@@ -0,0 +1,21 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: yt-dlp-discord-token
spec:
data:
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: yt-dlp-bot
metadataPolicy: None
property: DISCORD_TOKEN
secretKey: DISCORD_TOKEN
refreshInterval: 1h
secretStoreRef:
kind: ClusterSecretStore
name: weyma-vault
target:
creationPolicy: Owner
deletionPolicy: Retain
name: yt-dlp-discord-token

10
zap2xml/bucket.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: zap2xml-bucket
namespace: zap2xml
spec:
generateBucketName: zap2xml
storageClassName: weyma-s3-bucket
additionalConfig:
maxSize: "1Gi"

98
zap2xml/config.yaml Normal file
View File

@@ -0,0 +1,98 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: zap2xml-s3config
data:
.s3cfg: |
[default]
access_key =
access_token =
add_encoding_exts =
add_headers =
bucket_location = US
ca_certs_file =
cache_file =
check_ssl_certificate = True
check_ssl_hostname = True
cloudfront_host = cloudfront.amazonaws.com
connection_max_age = 5
connection_pooling = True
content_disposition =
content_type =
default_mime_type = binary/octet-stream
delay_updates = False
delete_after = False
delete_after_fetch = False
delete_removed = False
dry_run = False
enable_multipart = True
encoding = UTF-8
encrypt = False
expiry_date =
expiry_days =
expiry_prefix =
follow_symlinks = False
force = False
get_continue = False
gpg_command = /usr/bin/gpg
gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_passphrase =
guess_mime_type = True
host_base = https://weyma-s3.infra.dubyatp.xyz
host_bucket =
human_readable_sizes = False
invalidate_default_index_on_cf = False
invalidate_default_index_root_on_cf = True
invalidate_on_cf = False
keep_dirs = False
kms_key =
limit = -1
limitrate = 0
list_allow_unordered = False
list_md5 = False
log_target_prefix =
long_listing = False
max_delete = -1
max_retries = 5
mime_type =
multipart_chunk_size_mb = 15
multipart_copy_chunk_size_mb = 1024
multipart_max_chunks = 10000
preserve_attrs = True
progress_meter = True
proxy_host =
proxy_port = 0
public_url_use_https = False
put_continue = False
recursive = False
recv_chunk = 65536
reduced_redundancy = False
requester_pays = False
restore_days = 1
restore_priority = Standard
secret_key =
send_chunk = 65536
server_side_encryption = False
signature_v2 = False
signurl_use_https = False
simpledb_host = sdb.amazonaws.com
skip_destination_validation = False
skip_existing = False
socket_timeout = 300
ssl_client_cert_file =
ssl_client_key_file =
stats = False
stop_on_error = False
storage_class =
throttle_max = 100
upload_id =
urlencoding_mode = normal
use_http_expect = False
use_https = True
use_mime_magic = True
verbosity = WARNING
website_endpoint = http://%(bucket)s.s3-website-%(location)s.amazonaws.com/
website_error =
website_index = index.html

87
zap2xml/cron.yaml Normal file
View File

@@ -0,0 +1,87 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: zap2xml-dtv-02191
spec:
schedule: "0 */12 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: zap2xml
image: git.dubyatp.xyz/williamp/kube-zap2xml:c075fec
envFrom:
- secretRef:
name: zap2xml-bucket
env:
- name: LINEUP_ID
value: USA-DITV506-X
- name: POSTAL_CODE
value: "02191"
- name: TIMESPAN
value: "120"
- name: OUTPUT_FILE
value: /tmp/xmltv.xml
- name: PUBLIC_FILENAME
value: xmltv-directv-02191.xml
- name: S3_URL
value: s3://zap2xml-c134c9a7-a7a0-4113-997e-78e72ec3f576
volumeMounts:
- name: s3-config
mountPath: /root
- name: temp
mountPath: /tmp
restartPolicy: Never
volumes:
- name: s3-config
configMap:
name: zap2xml-s3config
- name: temp
emptyDir:
sizeLimit: 1Gi
medium: Memory
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: zap2xml-ota-02191
spec:
schedule: "30 */12 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: zap2xml
image: git.dubyatp.xyz/williamp/kube-zap2xml:c075fec
envFrom:
- secretRef:
name: zap2xml-bucket
env:
- name: LINEUP_ID
value: USA-OTA02191
- name: POSTAL_CODE
value: "02191"
- name: TIMESPAN
value: "120"
- name: OUTPUT_FILE
value: /tmp/xmltv.xml
- name: PUBLIC_FILENAME
value: xmltv-ota-02191.xml
- name: S3_URL
value: s3://zap2xml-c134c9a7-a7a0-4113-997e-78e72ec3f576
volumeMounts:
- name: s3-config
mountPath: /root
- name: temp
mountPath: /tmp
restartPolicy: Never
volumes:
- name: s3-config
configMap:
name: zap2xml-s3config
- name: temp
emptyDir:
sizeLimit: 1Gi
medium: Memory