Compare commits

..

1 Commits

Author SHA1 Message Date
e7eb048609 chore(deps): update helm release grafana to v9.4.2 2025-08-29 19:00:09 +00:00
77 changed files with 914 additions and 1363 deletions

View File

@@ -1,29 +0,0 @@
# Core Apps
**Production-grade application deployments for my Kubernetes homelab**
This repository contains the core applications deployed to my Kubernetes homelab. Applications are deployed using either Kubernetes manifests or Helm charts (with upstream subcharts and custom values).
**Why Helm?** I prefer Helm charts when upstream versions exist because Renovate can automatically track new chart versions, whereas image tags in raw manifests aren't always semantically versioned.
**GitOps Workflow:** This repository is monitored by ArgoCD and serves as the source of truth for deployments. Each top-level directory is its own ArgoCD Application, with subdirectories representing components within that application.
**Automated Commits:** Apps that I wrote/maintain directly (such as yt-dlp-bot and zap2xml) get their manifests automatically updated via an Actions workflow in their respective repositories
- `arr-stack/` - Arr Stack (manifests)
- `flaresolverr/` - Flaresolverr (captcha processor)
- `prowlarr/` - Prowlarr (indexer manager)
- `radarr/` - Radarr (movie media manager)
- `sonarr/` - Sonarr (TV series media manager)
- `tunnel/` - Custom SSH tunnel to my seedbox to securely communicate with Deluge
- `attic/` - Attic NixOS cache server (manifests)
- `authentik/` - [Authentik](https://auth.dubyatp.xyz) SSO server (Helm chart)
- `gitea/` - [Gitea](https://git.dubyatp.xyz) Git Server (Helm chart)
- `gitea-runner/` - Gitea Runner (manifests)
- `buildkitd/` - Docker Buildkitd build environment
- `grafana/` - [Grafana](https://grafana.dubyatp.xyz) observability dashboard (Helm chart)
- `jellyfin/` - [Jellyfin](https://jellyfin.dubyatp.xyz) media server (Helm chart)
- `renovate/` - [Renovate](https://git.dubyatp.xyz/renovate-bot) automated dependency manager (manifests)
- `vaultwarden/` - [Vaultwarden](https://vaultwarden.dubyatp.xyz) password manager (manifests)
- `whatismyip/` - [Simple "what is my IP" HTTP service](https://whatismyip.dubyatp.xyz) (manifests)
- `yt-dlp-bot/` - [yt-dlp bot](https://git.dubyatp.xyz/williamp/yt-dlp-bot) (manifests); a custom Discord bot i created for downloading and storing YouTube videos ad-hoc
- `zap2xml/` - [kube-zap2xml](https://git.dubyatp.xyz/williamp/kube-zap2xml) (manifests); modified version of zap2xml (zap2it TV listings scraper) designed for use as Kubernetes jobs and sends the result XMLTV format to a Rook-Ceph S3 bucket

View File

@@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: flaresolverr
spec:
replicas: 1
selector:
matchLabels:
app: flaresolverr
template:
metadata:
labels:
app: flaresolverr
spec:
containers:
- name: flaresolverr
image: ghcr.io/flaresolverr/flaresolverr:v3.4.1
resources:
requests:
memory: "2Gi"
cpu: "0.5"

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: flaresolverr
spec:
selector:
app: flaresolverr
ports:
- port: 8191
targetPort: 8191

View File

@@ -1,33 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: prowlarr
spec:
replicas: 1
selector:
matchLabels:
app: prowlarr
template:
metadata:
labels:
app: prowlarr
annotations:
backup.velero.io/backup-volumes: config
spec:
containers:
- name: prowlarr
image: linuxserver/prowlarr:version-2.0.5.5160
volumeMounts:
- name: config
mountPath: /config
resources:
limits:
memory: "1Gi"
cpu: "1"
requests:
memory: "512Mi"
cpu: "0.5"
volumes:
- name: config
persistentVolumeClaim:
claimName: prowlarr-config

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: prowlarr
spec:
selector:
app: prowlarr
ports:
- port: 9696
targetPort: 9696

View File

@@ -1,45 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr
spec:
replicas: 1
selector:
matchLabels:
app: radarr
template:
metadata:
labels:
app: radarr
annotations:
backup.velero.io/backup-volumes: config
spec:
containers:
- name: radarr
image: linuxserver/radarr:version-5.27.5.10198
volumeMounts:
- name: config
mountPath: /config
- name: downloads
mountPath: /mnt/Downloads
- name: movies
mountPath: /mnt/movies
resources:
limits:
memory: "1Gi"
cpu: "1"
requests:
memory: "512Mi"
cpu: "0.5"
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr-config
- name: movies
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/movies
- name: downloads
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/syncthing-downloads

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: radarr
spec:
selector:
app: radarr
ports:
- port: 7878
targetPort: 7878

View File

@@ -1,45 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sonarr
spec:
replicas: 1
selector:
matchLabels:
app: sonarr
template:
metadata:
labels:
app: sonarr
annotations:
backup.velero.io/backup-volumes: config
spec:
containers:
- name: sonarr
image: linuxserver/sonarr:4.0.15
volumeMounts:
- name: config
mountPath: /config
- name: downloads
mountPath: /mnt/Downloads
- name: tv-shows
mountPath: /mnt/tv-shows
resources:
limits:
memory: "1Gi"
cpu: "1"
requests:
memory: "512Mi"
cpu: "0.5"
volumes:
- name: config
persistentVolumeClaim:
claimName: sonarr-config
- name: tv-shows
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/tv-shows
- name: downloads
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/syncthing-downloads

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: sonarr
spec:
selector:
app: sonarr
ports:
- port: 8989
targetPort: 8989

View File

@@ -1,33 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deluge-tunnel
spec:
selector:
matchLabels:
app: deluge-tunnel
template:
metadata:
labels:
app: deluge-tunnel
spec:
containers:
- name: deluge-tunnel
image: kroniak/ssh-client:3.21
command: ["/bin/sh", "-c", "ssh -o StrictHostKeyChecking=no weyma-talos@45.152.211.243 -p 2222 -L 0.0.0.0:58846:127.0.0.1:58846 -L 0.0.0.0:8112:127.0.0.1:8112 -N"]
volumeMounts:
- name: ssh-keys
mountPath: /root/.ssh
resources:
limits:
memory: "512Mi"
cpu: "500m"
requests:
memory: "128Mi"
cpu: "200m"
volumes:
- name: ssh-keys
secret:
defaultMode: 0400
secretName: ssh-keys

View File

@@ -1,28 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: ssh-keys
spec:
data:
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: deluge-ssh
metadataPolicy: None
property: private
secretKey: id_ed25519
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: deluge-ssh
metadataPolicy: None
property: public
secretKey: id_ed25519.pub
refreshInterval: 1h
secretStoreRef:
kind: ClusterSecretStore
name: weyma-vault
target:
creationPolicy: Owner
deletionPolicy: Retain
name: ssh-keys

View File

@@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: deluge
spec:
selector:
app: deluge-tunnel
ports:
- port: 58846
targetPort: 58846
name: deluge
- port: 8112
targetPort: 8112
name: web

View File

@@ -1,10 +0,0 @@
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: attic-bucket
namespace: attic
spec:
additionalConfig:
maxSize: 100Gi
bucketName: attic-bucket
storageClassName: weyma-s3-bucket

View File

@@ -1,36 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: attic-config
data:
server.toml: |
listen = "[::]:8080"
allowed-hosts = []
#api-endpoint = "https://nix-cache.dubyatp.xyz/"
[database]
url = "sqlite:///var/empty/.local/share/attic/server.db"
[storage]
path = "/data/.local/share/attic/storage"
type = "local"
#region = "us-east-1"
#bucket = "attic-bucket"
#endpoint = "https://weyma-s3.infra.dubyatp.xyz"
[chunking]
nar-size-threshold = 65536
min-size = 16384
avg-size = 65536
max-size = 262144
[compression]
type = "zstd"
[garbage-collection]
interval = "12 hours"
[jwt]
[jwt.signing]

View File

@@ -3,7 +3,6 @@ kind: Deployment
metadata:
name: attic
spec:
replicas: 1
selector:
matchLabels:
app: attic
@@ -14,24 +13,17 @@ spec:
spec:
containers:
- name: attic
image: ghcr.io/zhaofengli/attic:c4ffb5e86e928572e867bd3f81545293313e0a08
image: ghcr.io/zhaofengli/attic:ff8a897d1f4408ebbf4d45fa9049c06b3e1e3f4e
envFrom:
- secretRef:
name: attic-secret
- secretRef:
name: attic-bucket
volumeMounts:
- name: attic-pvc
mountPath: /var/empty/
mountPath: /var/empty
resources:
limits:
memory: "2Gi"
cpu: "500m"
- name: multitool
image: wbitt/network-multitool
volumeMounts:
- name: attic-pvc
mountPath: /var/empty/
volumes:
- name: attic-pvc
persistentVolumeClaim:

View File

@@ -24,5 +24,5 @@ appVersion: "1.0"
dependencies:
- name: authentik
version: 2026.2.0
version: 2025.8.1
repository: https://charts.goauthentik.io

View File

@@ -15,35 +15,6 @@ authentik:
service:
labels:
metrics_enabled: "true"
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
httpGet:
path: "{{ .Values.authentik.web.path }}-/health/live/"
port: http
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
httpGet:
path: "{{ .Values.authentik.web.path }}-/health/ready/"
port: http
startupProbe:
failureThreshold: 60
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
httpGet:
path: "{{ .Values.authentik.web.path }}-/health/live/"
port: http
worker:
replicas: 3
volumeMounts:
@@ -54,6 +25,25 @@ authentik:
- name: cert-dubyatp-xyz
secret:
secretName: cert-dubyatp-xyz
redis:
enabled: true
architecture: standalone
auth:
enabled: false
master:
resourcesPreset: "none"
podAnnotations:
backup.velero.io/backup-volumes: redis-data
replica:
resourcesPreset: "none"
sentinel:
resourcesPreset: "none"
metrics:
resourcesPreset: "none"
volumePermissions:
resourcesPreset: "none"
sysctl:
resourcesPreset: "none"
global:
env:
- name: AUTHENTIK_SECRET_KEY
@@ -61,10 +51,8 @@ authentik:
secretKeyRef:
name: authentik-credentials
key: authentik-secret-key
- name: AUTHENTIK_POSTGRESQL__DISABLE_SERVER_SIDE_CURSORS
value: "true"
- name: AUTHENTIK_POSTGRESQL__HOST
value: pooler-weyma-rw-authentik.cloudnativepg.svc.cluster.local
value: weyma-pgsql-rw.cloudnativepg.svc.cluster.local
- name: AUTHENTIK_POSTGRESQL__NAME
value: authentik
- name: AUTHENTIK_POSTGRESQL__USER
@@ -74,37 +62,6 @@ authentik:
secretKeyRef:
name: authentik-db-auth
key: password
- name: AUTHENTIK_EMAIL__FROM
value: authentik_dubyatp@em924671.dubyatp.xyz
- name: AUTHENTIK_EMAIL__HOST
value: mail.smtp2go.com
- name: AUTHENTIK_EMAIL__USE_TLS
value: "true"
- name: AUTHENTIK_EMAIL__USERNAME
value: authentik_dubyatp
- name: AUTHENTIK_EMAIL__PASSWORD
valueFrom:
secretKeyRef:
name: authentik-credentials
key: smtp-password
- name: AUTHENTIK_EMAIL__TIMEOUT
value: "30"
- name: AUTHENTIK_STORAGE__BACKEND
value: "s3"
- name: AUTHENTIK_STORAGE__S3__ENDPOINT
value: "https://weyma-s3.infra.dubyatp.xyz"
- name: AUTHENTIK_STORAGE__S3__BUCKET_NAME
value: "authentik-files"
- name: AUTHENTIK_STORAGE__S3__ACCESS_KEY
valueFrom:
secretKeyRef:
name: authentik-files
key: AWS_ACCESS_KEY_ID
- name: AUTHENTIK_STORAGE__S3__SECRET_KEY
valueFrom:
secretKeyRef:
name: authentik-files
key: AWS_SECRET_ACCESS_KEY
additionalObjects:
- apiVersion: networking.k8s.io/v1
kind: Ingress
@@ -168,10 +125,6 @@ authentik:
remoteRef:
key: authentik
property: user-password
- secretKey: smtp-password
remoteRef:
key: authentik
property: smtp-password
- apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
@@ -192,13 +145,4 @@ authentik:
target:
creationPolicy: Owner
deletionPolicy: Retain
name: authentik-db-auth
- apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: authentik-files
spec:
additionalConfig:
maxSize: 20Gi
bucketName: authentik-files
storageClassName: weyma-s3-bucket
name: authentik-db-auth

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: cert-dubyatp-xyz
annotations:
replicator.v1.mittwald.de/replicate-from: "cert-manager/cert-dubyatp-xyz"
replicator.v1.mittwald.de/replicated-keys: "tls.crt,tls.key"
data:
tls.crt: ""
tls.key: ""

79
emby/deployment.yaml Normal file
View File

@@ -0,0 +1,79 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: emby
spec:
strategy:
type: Recreate
selector:
matchLabels:
app: emby
template:
metadata:
annotations:
backup.velero.io/backup-volumes: emby-config
labels:
app: emby
spec:
volumes:
- name: tv-shows
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/tv-shows
- name: movies
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/movies
- name: emby-config
persistentVolumeClaim:
claimName: emby-config
- name: transcode-temp
emptyDir:
sizeLimit: 8Gi
medium: Memory
- name: dev-dri
hostPath:
path: /dev/dri
containers:
- name: emby
image: emby/embyserver:4.8.11.0
volumeMounts:
- name: tv-shows
mountPath: /mnt/tv-shows
- name: movies
mountPath: /mnt/movies
- name: emby-config
mountPath: /config
- name: transcode-temp
mountPath: /tmp/transcode
- name: dev-dri
mountPath: /dev/dri
env:
- name: UID
value: "1000"
- name: GID
value: "1000"
- name: GIDLIST
value: "100"
livenessProbe:
httpGet:
path: /
port: http
securityContext:
privileged: true
resources:
limits:
memory: 8Gi
cpu: '1'
requests:
memory: 4Gi
cpu: "500m"
nodeSelector:
kubernetes.io/hostname: weyma-talos-testw04
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: extensions.talos.dev/i915
operator: Exists

22
emby/ingress.yaml Normal file
View File

@@ -0,0 +1,22 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: emby-ingress
annotations:
traefik.ingress.kubernetes.io/router.middlewares: cloudflarewarp@file
spec:
rules:
- host: emby.dubyatp.xyz
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: emby-http-svc
port:
number: 8096
tls:
- hosts:
- emby.dubyatp.xyz
secretName: cert-dubyatp-xyz

View File

@@ -1,11 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: prowlarr-config
name: emby-config
spec:
storageClassName: weyma-shared
resources:
requests:
storage: 10Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
- ReadWriteOnce

View File

@@ -1,11 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: radarr-config
name: resilio-pvc
spec:
storageClassName: weyma-shared
resources:
requests:
storage: 10Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
- ReadWriteOnce

39
emby/resilio-sync.yaml Normal file
View File

@@ -0,0 +1,39 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: resilio-sync
spec:
selector:
matchLabels:
app: resilio-sync
template:
metadata:
labels:
app: resilio-sync
spec:
containers:
- name: resilio-sync
image: lscr.io/linuxserver/resilio-sync:3.0.0
volumeMounts:
- name: config
mountPath: /config
- name: tv-shows
mountPath: /sync/tv-shows
- name: movies
mountPath: /sync/movies
resources:
limits:
memory: "700Mi"
cpu: "500m"
volumes:
- name: config
persistentVolumeClaim:
claimName: resilio-pvc
- name: tv-shows
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/tv-shows
- name: movies
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/movies

23
emby/svc.yaml Normal file
View File

@@ -0,0 +1,23 @@
apiVersion: v1
kind: Service
metadata:
name: emby-http-svc
spec:
type: ClusterIP
selector:
app: emby
ports:
- port: 8096
targetPort: 8096
---
apiVersion: v1
kind: Service
metadata:
name: emby-https-svc
spec:
type: ClusterIP
selector:
app: emby
ports:
- port: 8920
targetPort: 8920

View File

@@ -1,40 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: buildkitd
namespace: gitea-runner
spec:
progressDeadlineSeconds: 600
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
app: buildkitd
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: buildkitd
spec:
containers:
- args:
- --addr
- tcp://0.0.0.0:1234
image: moby/buildkit:v0.27.1
imagePullPolicy: Always
name: buildkitd
ports:
- containerPort: 1234
protocol: TCP
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
terminationGracePeriodSeconds: 30

View File

@@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: buildkitd
namespace: gitea-runner
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- port: 1234
selector:
app: buildkitd

View File

@@ -1,41 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: runner-config
data:
config.yaml: |-
log:
level: info
runner:
file: /data/.runner
capacity: 1
env_file: .env
timeout: 3h
shutdown_timeout: 0s
insecure: false
fetch_timeout: 5s
fetch_interval: 2s
labels:
- "ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest"
- "ubuntu-22.04:docker://docker.gitea.com/runner-images:ubuntu-22.04"
- "ubuntu-20.04:docker://docker.gitea.com/runner-images:ubuntu-20.04"
cache:
enabled: true
dir: ""
host: ""
port: 0
external_server: ""
container:
network: "host"
privileged: false
options:
workdir_parent: /scratch
valid_volumes:
- /scratch/**
docker_host: ""
force_pull: true
force_rebuild: false
require_docker: false
docker_timeout: 0s
host:
workdir_parent:

View File

@@ -0,0 +1,79 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "4"
labels:
app: act-runner
name: act-runner
namespace: gitea-runner
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: act-runner
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: act-runner
spec:
containers:
- command:
- sh
- -c
- while ! nc -z localhost 2376 </dev/null; do echo 'waiting for docker daemon...';
sleep 5; done; /sbin/tini -- run.sh
env:
- name: DOCKER_HOST
value: tcp://localhost:2376
- name: DOCKER_CERT_PATH
value: /certs/client
- name: DOCKER_TLS_VERIFY
value: "1"
- name: GITEA_INSTANCE_URL
value: https://git.dubyatp.xyz
- name: GITEA_RUNNER_REGISTRATION_TOKEN
valueFrom:
secretKeyRef:
key: token
name: runner-secret
image: gitea/act_runner:nightly
imagePullPolicy: Always
name: runner
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /certs
name: docker-certs
- mountPath: /data
name: runner-data
- env:
- name: DOCKER_TLS_CERTDIR
value: /certs
image: docker:23.0.6-dind
imagePullPolicy: IfNotPresent
name: daemon
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /certs
name: docker-certs
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
terminationGracePeriodSeconds: 30
volumes:
- name: docker-certs
- name: runner-data
persistentVolumeClaim:
claimName: act-runner-vol

View File

@@ -1,11 +1,12 @@
kind: PersistentVolumeClaim
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: shared-certs-pvc
name: gitea-runner-pvc
spec:
storageClassName: weyma-shared
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Mi
storage: 1Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
storageClassName: weyma-shared

View File

@@ -1,86 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: act-runner
namespace: gitea-runner
labels:
app: act-runner
spec:
serviceName: ""
selector:
matchLabels:
app: act-runner
replicas: 3
template:
metadata:
labels:
app: act-runner
spec:
initContainers:
- name: sysctl
image: busybox
securityContext:
privileged: true
command:
- sh
- -c
- echo 28633 > /proc/sys/user/max_user_namespaces
- name: chown-data
image: busybox
securityContext:
runAsUser: 0
command:
- sh
- -c
- chown -R 1000:1000 /data
volumeMounts:
- name: runner-data
mountPath: /data
containers:
- name: runner
image: gitea/act_runner:nightly-dind-rootless
imagePullPolicy: Always
env:
- name: CONFIG_FILE
value: /config/config.yaml
- name: DOCKER_HOST
value: unix:///run/user/1000/docker.sock
- name: GITEA_INSTANCE_URL
value: https://git.dubyatp.xyz
- name: GITEA_RUNNER_REGISTRATION_TOKEN
valueFrom:
secretKeyRef:
key: registration-token
name: gitea-runner-token
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- name: runner-config
mountPath: /config
- name: runner-data
mountPath: /data
- name: runner-scratch
mountPath: /scratch
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
terminationGracePeriodSeconds: 30
volumes:
- name: runner-scratch
emptyDir:
medium: Memory
sizeLimit: 5Gi
- name: runner-config
configMap:
name: runner-config
volumeClaimTemplates:
- metadata:
name: runner-data
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: weyma-shared
resources:
requests:
storage: 32Gi

View File

@@ -24,5 +24,5 @@ appVersion: "1.0"
dependencies:
- name: gitea
version: 12.5.6
repository: https://weyma-s3.infra.dubyatp.xyz/helm-bucket-ea34bc44-ef19-480d-a16a-1e583991f123/charts/
version: 12.2.0
repository: https://dl.gitea.com/charts/

View File

@@ -56,9 +56,19 @@ gitea:
config:
database:
DB_TYPE: postgres
HOST: pooler-weyma-rw.cloudnativepg.svc.cluster.local
HOST: weyma-pgsql-rw.cloudnativepg.svc.cluster.local
NAME: gitea
USER: gitea
queue:
TYPE: redis
CONN_STR: redis+cluster://:@gitea-kv-headless.gitea.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
session:
PROVIDER: redis
PROVIDER_CONFIG: redis+cluster://:@gitea-kv-headless.gitea.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
cache:
ENABLED: 'true'
ADAPTER: redis
HOST: redis+cluster://:@gitea-kv-headless.gitea.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
server:
DISABLE_SSH: false
DOMAIN: git.dubyatp.xyz
@@ -70,7 +80,7 @@ gitea:
START_SSH_SERVER: true
OFFLINE_MODE: false
service:
DISABLE_REGISTRATION: true
DISABLE_REGISTRATION: false
webhook:
ALLOWED_HOST_LIST: "drone.infra.dubyatp.xyz,argocd.infra.dubyatp.xyz,discord.com,10.0.0.0/8"
mailer:
@@ -82,16 +92,21 @@ gitea:
USER: gitea_dubyatp
security:
INSTALL_LOCK: true
metrics:
enabled: true
serviceMonitor:
enabled: true
livenessProbe:
enabled: true
httpGet:
path: /api/healthz
port: 3000
extraDeploy:
- apiVersion: hyperspike.io/v1
kind: Valkey
metadata:
name: gitea-kv
spec:
anonymousAuth: true
certIssuerType: ClusterIssuer
clusterDomain: cluster.local
clusterPreferredEndpointType: ip
nodes: 1
prometheus: false
replicas: 3
tls: false
volumePermissions: true
- apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
@@ -188,6 +203,4 @@ gitea:
postgresql-ha:
enabled: false
valkey-cluster:
enabled: true
valkey:
resourcesPreset: "small"
enabled: false

View File

@@ -24,5 +24,5 @@ appVersion: "1.0"
dependencies:
- name: grafana
version: 10.5.15
version: 9.4.2
repository: https://grafana.github.io/helm-charts

View File

@@ -191,6 +191,6 @@ grafana:
image:
registry: docker.io
repository: bats/bats
tag: 1.13.0
tag: 1.12.0
imagePullPolicy: IfNotPresent
useStatefulSet: false

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: immich-config
data:
immich-config.yaml: |
trash:
enabled: true
days: 30

View File

@@ -1,18 +1,22 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mosquitto-ingress
name: immich
labels:
app.kubernetes.io/name: mosquitto-ingress
name: immich
spec:
rules:
- host: broker.netmaker.infra.dubyatp.xyz
- host: immich.dubyatp.xyz
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: mq
name: immich
port:
number: 8883
number: 2283
tls:
- secretName: cert-dubyatp-xyz
hosts:
- immich.dubyatp.xyz

View File

@@ -1,11 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: sonarr-config
name: immich-library
spec:
resources:
requests:
storage: 10Gi
storage: 50Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany

View File

@@ -0,0 +1,94 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: immich-ml
spec:
selector:
matchLabels:
app: immich-ml
template:
metadata:
labels:
app: immich-ml
spec:
containers:
- name: immich-ml
image: ghcr.io/immich-app/immich-machine-learning:v1.134.0
volumeMounts:
- name: model-cache
mountPath: /cache
- name: config
mountPath: /config/immich-config.yaml
- name: dev-dri
mountPath: /dev/dri
env:
- name: DB_HOSTNAME
value: "immich-rw.cloudnativepg.svc.cluster.local"
- name: DB_DATABASE_NAME
value: "immich"
- name: DB_USERNAME
valueFrom:
secretKeyRef:
key: username
name: postgres-credentials
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: postgres-credentials
- name: REDIS_HOSTNAME
value: redis
- name: REDIS_PORT
value: "6379"
- name: IMMICH_PORT
value: "3003"
livenessProbe:
httpGet:
path: /ping
port: 3003
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /ping
port: 3003
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
startupProbe:
httpGet:
path: /ping
port: 3003
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 30
securityContext:
privileged: true
resources:
limits:
memory: "8Gi"
cpu: "2"
requests:
memory: "2Gi"
cpu: "500m"
volumes:
- name: model-cache
emptyDir:
sizeLimit: 10Gi
- name: config
configMap:
name: immich-config
- name: dev-dri
hostPath:
path: /dev/dri
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: extensions.talos.dev/i915
operator: Exists

View File

@@ -1,7 +1,7 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: postgres-pw
name: postgres-credentials
spec:
data:
- remoteRef:
@@ -9,13 +9,17 @@ spec:
decodingStrategy: None
key: cloudnativepg
metadataPolicy: None
property: netmaker_pw
property: immich_pw
secretKey: password
refreshInterval: 1h
secretStoreRef:
kind: ClusterSecretStore
name: weyma-vault
target:
template:
data:
username: immich
password: "{{ .password }}"
creationPolicy: Owner
deletionPolicy: Retain
name: postgres-pw
name: postgres-credentials

View File

@@ -0,0 +1,94 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: immich-server
spec:
selector:
matchLabels:
app: immich-server
template:
metadata:
labels:
app: immich-server
spec:
containers:
- name: immich-server
image: ghcr.io/immich-app/immich-server:v1.134.0
volumeMounts:
- name: library
mountPath: /usr/src/app/upload
- name: config
mountPath: /config/immich-config.yaml
- name: dev-dri
mountPath: /dev/dri
env:
- name: DB_HOSTNAME
value: "immich-rw.cloudnativepg.svc.cluster.local"
- name: DB_DATABASE_NAME
value: "immich"
- name: DB_USERNAME
valueFrom:
secretKeyRef:
key: username
name: postgres-credentials
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: postgres-credentials
- name: REDIS_HOSTNAME
value: redis
- name: REDIS_PORT
value: "6379"
- name: IMMICH_PORT
value: "2283"
livenessProbe:
httpGet:
path: /api/server/ping
port: 2283
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/server/ping
port: 2283
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
startupProbe:
httpGet:
path: /api/server/ping
port: 2283
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 30
securityContext:
privileged: true
resources:
limits:
memory: "8Gi"
cpu: "2"
requests:
memory: "2Gi"
cpu: "500m"
volumes:
- name: library
persistentVolumeClaim:
claimName: immich-library
- name: config
configMap:
name: immich-config
- name: dev-dri
hostPath:
path: /dev/dri
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: extensions.talos.dev/i915
operator: Exists

23
immich/immich-svc.yaml Normal file
View File

@@ -0,0 +1,23 @@
apiVersion: v1
kind: Service
metadata:
name: immich
spec:
selector:
app: immich-server
ports:
- port: 2283
targetPort: 2283
name: http
---
apiVersion: v1
kind: Service
metadata:
name: immich-ml
spec:
selector:
app: immich-ml
ports:
- port: 3003
targetPort: 3003
name: http

View File

@@ -0,0 +1,38 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: redis
spec:
selector:
matchLabels:
app: redis
serviceName: redis
replicas: 1
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:latest
command: ["redis-server"]
args:
- "--port"
- "6379"
- "--dir"
- "/data"
- "--appendonly"
- "yes"
volumeMounts:
- name: data
mountPath: /data
volumeClaimTemplates:
- spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: rook-ceph-block
resources:
requests:
storage: 10Gi
metadata:
name: data

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: redis
spec:
selector:
app: redis
ports:
- port: 6379
targetPort: 6379

View File

@@ -1,33 +0,0 @@
{{- if and (.Values.jellyfin.metrics.enabled) (.Values.jellyfin.ingress.enabled) -}}
---
apiVersion: v1
kind: Service
metadata:
name: dummy-svc
namespace: {{ .Release.Namespace }}
spec:
selector:
app: dummy-svc
ports:
- protocol: TCP
port: 6767
targetPort: 6767
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: block-metrics
namespace: {{ .Release.Namespace }}
spec:
rules:
- host: {{ (index .Values.jellyfin.ingress.hosts 0).host }}
http:
paths:
- pathType: Prefix
path: "/metrics"
backend:
service:
name: dummy-svc
port:
number: 6767
{{- end }}

View File

@@ -1,26 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: emby-redirect
spec:
redirectRegex:
regex: ^https?://emby\.dubyatp\.xyz/(.*)$
replacement: https://jellyfin.dubyatp.xyz/${1}
permanent: true
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: emby-redirect
spec:
entryPoints:
- websecure
- web
routes:
- kind: Rule
match: Host(`emby.dubyatp.xyz`)
middlewares:
- name: emby-redirect
services:
- name: noop@internal
kind: TraefikService

View File

@@ -1,11 +0,0 @@
apiVersion: v1
data:
tls.crt:
tls.key:
kind: Secret
metadata:
annotations:
replicator.v1.mittwald.de/replicate-from: cert-manager/cert-dubyatp-xyz
replicator.v1.mittwald.de/replicated-keys: tls.crt,tls.key
name: cert-dubyatp-xyz
type: Opaque

View File

@@ -1,73 +0,0 @@
jellyfin:
deploymentStrategy:
type: Recreate
ingress:
enabled: true
hosts:
- host: jellyfin.dubyatp.xyz
paths:
- path: /
pathType: ImplementationSpecific
tls:
- secretName: cert-dubyatp.xyz
hosts:
- jellyfin.dubyatp.xyz
persistence:
config:
size: 25Gi
media:
enabled: false
volumes:
- name: tv-shows
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/tv-shows
- name: movies
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/movies
- name: dvr
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/DVR
- name: youtube-vids
nfs:
server: 10.105.15.20
path: /mnt/hdd-pool/youtube-vids
- name: transcode-temp
emptyDir:
sizeLimit: 8Gi
medium: Memory
- name: dev-dri
hostPath:
path: /dev/dri
metrics:
enabled: true
serviceMonitor:
enabled: true
volumeMounts:
- name: tv-shows
mountPath: /mnt/tv-shows
- name: movies
mountPath: /mnt/movies
- name: dvr
mountPath: /mnt/dvr
- name: youtube-vids
mountPath: /mnt/youtube-vids
- name: transcode-temp
mountPath: /tmp/transcode
- name: dev-dri
mountPath: /dev/dri
podAnnotations:
backup.velero.io/backup-volumes: config
securityContext:
privileged: true
nodeSelector:
kubernetes.io/hostname: weyma-talos-testw04
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: extensions.talos.dev/i915
operator: Exists

View File

@@ -1,25 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: netmaker-config
data:
SERVER_NAME: netmaker.infra.dubyatp.xyz
SERVER_API_CONN_STRING: api.netmaker.infra.dubyatp.xyz:443
SERVER_HTTP_HOST: api.netmaker.infra.dubyatp.xyz
API_PORT: "8081"
WG_QUICK_USERSPACE_IMPLEMENTATION: wireguard-go
DNS_MODE: "off"
DISPLAY_KEYS: "on"
DATABASE: postgres
SQL_HOST: "pooler-weyma-rw.cloudnativepg.svc.cluster.local"
SQL_PORT: "5432"
SQL_DB: "netmaker"
SQL_USER: "netmaker"
MQ_USERNAME: netmaker
CORS_ALLOWED_ORIGIN: '*'
SERVER_BROKER_ENDPOINT: "ws://mq:1883"
BROKER_ENDPOINT: "wss://broker.netmaker.infra.dubyatp.xyz"
PLATFORM: "Kubernetes"
VERBOSITY: "3"
K8s: "true"
CACHING_ENABLED: "false"

View File

@@ -1,38 +0,0 @@
apiVersion: v1
data:
mosquitto.conf: |
per_listener_settings false
listener 8883
protocol websockets
allow_anonymous false
listener 1883
protocol websockets
allow_anonymous false
password_file /mosquitto/temp/password.txt
wait.sh: |
#!/bin/ash
encrypt_password() {
echo "${MQ_USERNAME}:${MQ_PASSWORD}" > /mosquitto/temp/password.txt
mosquitto_passwd -U /mosquitto/temp/password.txt
chmod 0700 /mosquitto/temp/password.txt
}
main(){
encrypt_password
echo "Starting MQ..."
# Run the main container command.
/docker-entrypoint.sh
/usr/sbin/mosquitto -c /mosquitto/config/mosquitto.conf
}
main "${@}"
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/instance: mosquitto
app.kubernetes.io/name: mosquitto
name: mosquitto-config
namespace: netmaker

View File

@@ -1,83 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mosquitto
spec:
progressDeadlineSeconds: 600
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: mosquitto
app.kubernetes.io/name: mosquitto
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/instance: mosquitto
app.kubernetes.io/name: mosquitto
spec:
containers:
- image: eclipse-mosquitto:2.0.22-openssl
imagePullPolicy: IfNotPresent
command: ["/mosquitto/config/wait.sh"]
livenessProbe:
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: 8883
timeoutSeconds: 1
name: mosquitto
env:
- name: MQ_USERNAME
value: netmaker
- name: MQ_PASSWORD
valueFrom:
secretKeyRef:
key: mq_password
name: netmaker-secrets
ports:
- containerPort: 1883
name: mqtt
protocol: TCP
- containerPort: 8883
name: mqtt2
protocol: TCP
readinessProbe:
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: 8883
timeoutSeconds: 1
resources: {}
startupProbe:
failureThreshold: 30
periodSeconds: 5
successThreshold: 1
tcpSocket:
port: 8883
timeoutSeconds: 1
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /mosquitto/config
name: mosquitto-config
- mountPath: /mosquitto/certs
name: shared-certs
- mountPath: /mosquitto/temp
name: mosquitto-temp
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds: 30
volumes:
- configMap:
name: mosquitto-config
defaultMode: 0755
name: mosquitto-config
- name: mosquitto-temp
emptyDir:
- name: shared-certs
persistentVolumeClaim:
claimName: shared-certs-pvc

View File

@@ -1,36 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: mq
namespace: netmaker
spec:
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: mqtt
- name: mqtt2
port: 8883
protocol: TCP
targetPort: mqtt2
selector:
app.kubernetes.io/instance: mosquitto
app.kubernetes.io/name: mosquitto
sessionAffinity: None
---
apiVersion: v1
kind: Service
metadata:
name: 'netmaker-mqtt'
spec:
externalTrafficPolicy: Cluster
type: NodePort
selector:
app.kubernetes.io/instance: mosquitto
app.kubernetes.io/name: mosquitto
ports:
- port: 31883
nodePort: 31883
protocol: TCP
targetPort: 8883
name: nm-mqtt

View File

@@ -1,95 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: netmaker
name: netmaker
spec:
replicas: 3
serviceName: netmaker-headless
selector:
matchLabels:
app: netmaker
template:
metadata:
labels:
app: netmaker
spec:
initContainers:
- name: init-sysctl
image: busybox
imagePullPolicy: IfNotPresent
command: ["/bin/sh", "-c"]
args: ["sysctl -w net.ipv4.ip_forward=1 && sysctl -w net.ipv4.conf.all.src_valid_mark=1 && sysctl -w net.ipv6.conf.all.disable_ipv6=0 && sysctl -w net.ipv6.conf.all.forwarding=1"]
securityContext:
privileged: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- env:
- name: NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: SQL_PASS
valueFrom:
secretKeyRef:
key: password
name: postgres-pw
- name: MASTER_KEY
valueFrom:
secretKeyRef:
key: master_key
name: netmaker-secrets
- name: MQ_PASSWORD
valueFrom:
secretKeyRef:
key: mq_password
name: netmaker-secrets
- name: TURN_SERVER_PASSWORD
valueFrom:
secretKeyRef:
key: turn_password
name: netmaker-secrets
envFrom:
- configMapRef:
name: netmaker-config
image: gravitl/netmaker:v1.4.0
imagePullPolicy: Always
name: netmaker
ports:
- containerPort: 8081
protocol: TCP
- containerPort: 31821
protocol: UDP
- containerPort: 31822
protocol: UDP
- containerPort: 31823
protocol: UDP
- containerPort: 31824
protocol: UDP
- containerPort: 31825
protocol: UDP
- containerPort: 31826
protocol: UDP
- containerPort: 31827
protocol: UDP
- containerPort: 31828
protocol: UDP
- containerPort: 31829
protocol: UDP
- containerPort: 31830
protocol: UDP
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/netmaker/
name: shared-certs
volumes:
- name: shared-certs
persistentVolumeClaim:
claimName: shared-certs-pvc

View File

@@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: 'netmaker-rest'
spec:
ports:
- name: rest
port: 8081
protocol: TCP
targetPort: 8081
selector:
app: 'netmaker'
sessionAffinity: None
type: ClusterIP

View File

@@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: netmaker-ui
spec:
replicas: 2
selector:
matchLabels:
app: netmaker-ui
template:
metadata:
labels:
app: netmaker-ui
spec:
containers:
- name: netmaker-ui
image: gravitl/netmaker-ui:v1.1.0
env:
- name: BACKEND_URL
value: 'https://api.netmaker.infra.dubyatp.xyz'
terminationGracePeriodSeconds: 15

View File

@@ -1,16 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: netmaker-ui-ingress
spec:
rules:
- host: dashboard.netmaker.infra.dubyatp.xyz
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: netmaker-ui
port:
number: 80

View File

@@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: 'netmaker-ui'
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: 'netmaker-ui'
sessionAffinity: None
type: 'ClusterIP'

View File

@@ -1,10 +1,10 @@
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: zap2xml-bucket
namespace: zap2xml
name: peertube-bucket
namespace: peertube
spec:
generateBucketName: zap2xml
generateBucketName: peertube
storageClassName: weyma-s3-bucket
additionalConfig:
maxSize: "1Gi"
maxSize: "100Gi"

35
peertube/config.yaml Normal file
View File

@@ -0,0 +1,35 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: peertube-config
data:
PEERTUBE_INSTANCE_NAME: "dubyatp peertube"
PEERTUBE_INSTANCE_DESCRIPTION: "duby's peertube instance"
POSTGRES_USER: peertube
POSTGRES_DB: peertube
PEERTUBE_DB_USERNAME: peertube
PEERTUBE_DB_HOSTNAME: weyma-pgsql-rw.cloudnativepg.svc.cluster.local
PEERTUBE_DB_PORT: "5432"
PEERTUBE_WEBSERVER_HOSTNAME: "tube.dubyatp.xyz"
PEERTUBE_TRUST_PROXY: '["127.0.0.1", "loopback", "172.18.0.0/16"]'
PEERTUBE_SMTP_USERNAME: "peertube_dubyatp"
PEERTUBE_SMTP_HOSTNAME: "mail.smtp2go.com"
PEERTUBE_SMTP_PORT: "465"
PEERTUBE_SMTP_TLS: "true"
PEERTUBE_SMTP_FROM: "peertube@em924671.dubyatp.xyz"
PEERTUBE_ADMIN_EMAIL: "me@williamtpeebles.com"
#PEERTUBE_OBJECT_STORAGE_ENABLED: "true"
#PEERTUBE_OBJECT_STORAGE_ENDPOINT: "https://weyma-s3.infra.dubyatp.xyz"
#PEERTUBE_OBJECT_STORAGE_REGION: ""
#PEERTUBE_OBJECT_STORAGE_STREAMING_PLAYLISTS_BUCKET_NAME: "peertube-953221d2-7649-48b2-b79f-5a9e59daedbb"
#PEERTUBE_OBJECT_STORAGE_STREAMING_PLAYLISTS_PREFIX: "streaming/"
#PEERTUBE_OBJECT_STORAGE_WEB_VIDEOS_BUCKET_NAME: "peertube-953221d2-7649-48b2-b79f-5a9e59daedbb"
#PEERTUBE_OBJECT_STORAGE_WEB_VIDEOS_PREFIX: "videos/"
#PEERTUBE_OBJECT_STORAGE_USER_EXPORTS_BUCKET_NAME: "peertube-953221d2-7649-48b2-b79f-5a9e59daedbb"
#PEERTUBE_OBJECT_STORAGE_USER_EXPORTS_PREFIX: "exports/"
#PEERTUBE_OBJECT_STORAGE_ORIGINAL_VIDEO_FILES_BUCKET_NAME: "peertube-953221d2-7649-48b2-b79f-5a9e59daedbb"
#PEERTUBE_OBJECT_STORAGE_ORIGINAL_VIDEO_FILES_PREFIX: "original-videos/"
#PEERTUBE_OBJECT_STORAGE_CAPTIONS_BUCKET_NAME: "peertube-953221d2-7649-48b2-b79f-5a9e59daedbb"
#PEERTUBE_OBJECT_STORAGE_CAPTIONS_PREFIX: "captions/"
#PEERTUBE_OBJECT_STORAGE_UPLOAD_ACL_PUBLIC: "public-read"
#PEERTUBE_OBJECT_STORAGE_UPLOAD_ACL_PRIVATE: "private"

69
peertube/deployment.yaml Normal file
View File

@@ -0,0 +1,69 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: peertube
labels:
app: peertube
spec:
replicas: 1
selector:
matchLabels:
app: peertube
template:
metadata:
labels:
app: peertube
spec:
containers:
- name: peertube
image: chocobozzz/peertube:v7.2.3-bookworm
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
name: http
- containerPort: 443
name: https
- containerPort: 9000
name: peertube
- containerPort: 1935
name: rtmp
envFrom:
- secretRef:
name: peertube-secret
- secretRef:
name: peertube-bucket
- configMapRef:
name: peertube-config
env:
- name: PEERTUBE_REDIS_HOSTNAME
value: "localhost"
- name: PEERTUBE_REDIS_AUTH
value: ""
volumeMounts:
- name: peertube-data
mountPath: /data
resources:
requests:
cpu: "0.5"
memory: 1Gi
limits:
cpu: "1"
memory: 2Gi
- name: redis
image: redis:8.2.1-alpine
imagePullPolicy: IfNotPresent
ports:
- containerPort: 6379
name: redis
resources:
requests:
cpu: "0.2"
memory: 256Mi
limits:
cpu: "0.5"
memory: 1Gi
volumes:
- name: peertube-data
persistentVolumeClaim:
claimName: peertube-data

View File

@@ -1,16 +1,18 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: netmaker-api-ingress
name: peertube
labels:
app.kubernetes.io/name: peertube
spec:
rules:
- host: api.netmaker.infra.dubyatp.xyz
- host: tube.dubyatp.xyz
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: netmaker-rest
name: peertube
port:
number: 8081
number: 9000

10
peertube/pvc.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: peertube-data
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 50Gi

View File

@@ -1,30 +1,37 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: netmaker-secrets
name: peertube-secret
spec:
data:
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: netmaker
key: peertube
metadataPolicy: None
property: master_key
secretKey: master_key
property: PEERTUBE_SECRET
secretKey: PEERTUBE_SECRET
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: netmaker
key: peertube
metadataPolicy: None
property: mq_password
secretKey: mq_password
property: PEERTUBE_DB_PASSWORD
secretKey: PEERTUBE_DB_PASSWORD
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: netmaker
key: peertube
metadataPolicy: None
property: turn_password
secretKey: turn_password
property: PEERTUBE_SMTP_PASSWORD
secretKey: PEERTUBE_SMTP_PASSWORD
- remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: peertube
metadataPolicy: None
property: POSTGRES_PASSWORD
secretKey: POSTGRES_PASSWORD
refreshInterval: 1h
secretStoreRef:
kind: ClusterSecretStore
@@ -32,4 +39,4 @@ spec:
target:
creationPolicy: Owner
deletionPolicy: Retain
name: netmaker-secrets
name: peertube-secret

24
peertube/service.yaml Normal file
View File

@@ -0,0 +1,24 @@
kind: Service
apiVersion: v1
metadata:
name: peertube
spec:
selector:
app: peertube
ports:
- protocol: TCP
port: 80
targetPort: 80
name: http
- protocol: TCP
port: 25
targetPort: 25
name: smtp
- protocol: TCP
port: 9000
targetPort: 9000
name: peertube
- protocol: TCP
name: rtmp
port: 1935
targetPort: 1935

16
peertube/valkey.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: hyperspike.io/v1
kind: Valkey
metadata:
name: peertube-kv
labels:
app.kubernetes.io/instance: peertube
spec:
anonymousAuth: true
certIssuerType: ClusterIssuer
clusterDomain: cluster.local
clusterPreferredEndpointType: ip
nodes: 1
prometheus: false
replicas: 3
tls: false
volumePermissions: true

View File

@@ -1,6 +1,3 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"kubernetes": {
"managerFilePatterns": ["deployment.yaml", "statefulset.yaml", "cron.yaml", "cronjob.yaml"]
}
"$schema": "https://docs.renovatebot.com/renovate-schema.json"
}

View File

@@ -10,7 +10,6 @@ data:
"infrastructure/db-operators",
"infrastructure/weyma-talos",
"williamp/dubyatp.xyz",
"williamp/yt-dlp-bot",
"williamp/helm-gitea"
"williamp/yt-dlp-bot"
]
}

View File

@@ -27,11 +27,6 @@ spec:
secretKeyRef:
key: github-com-pat
name: renovate-github-com-token
- name: RENOVATE_GIT_PRIVATE_KEY
valueFrom:
secretKeyRef:
key: ssh-key
name: renovate-ssh-key
- name: RENOVATE_AUTODISCOVER
value: 'false'
- name: RENOVATE_BASE_DIR

View File

@@ -1,17 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: renovate-ssh-key
spec:
refreshInterval: 1h
secretStoreRef:
name: weyma-vault
kind: ClusterSecretStore
target:
name: renovate-ssh-key
creationPolicy: Owner
data:
- secretKey: ssh-key
remoteRef:
key: renovate
property: ssh-key

View File

@@ -17,7 +17,7 @@ spec:
spec:
containers:
- name: vaultwarden
image: vaultwarden/server:1.35.2-alpine
image: vaultwarden/server:1.33.2-alpine
livenessProbe:
exec:
command:

View File

@@ -1,5 +1,5 @@
apiVersion: v2
name: jellyfin
name: wekan
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
@@ -23,6 +23,6 @@ version: 0.1.0
appVersion: "1.0"
dependencies:
- name: jellyfin
version: 2.7.0
repository: https://jellyfin.github.io/jellyfin-helm
- name: wekan
version: 7.94.0
repository: https://wekan.github.io/charts/

View File

@@ -0,0 +1,12 @@
{{- define "wekan.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,12 @@
{{ if .Values.configMapsManaged }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "wekan.fullname" $ }}-config
data:
{{- range $key := .Values.configMapEnv -}}
{{ if $key.value }}
{{ $key.name | indent 2 }}: {{ $key.value | toString | quote }}
{{- end }}
{{- end }}
{{ end }}

View File

@@ -0,0 +1,31 @@
{{- if .Values.externalSecretsManaged }}
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: {{ include "wekan.fullname" . }}-extsecret
spec:
data:
{{- if .Values.externalSecrets.secrets }}
{{- range $key := .Values.externalSecrets.secrets }}
{{- if $key.keyName }}
- secretKey: {{ $key.secretKeyName }}
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: {{ $key.keyName }}
metadataPolicy: None
property: {{ $key.property }}
{{- end }}
{{- end }}
{{- else }}
{{- fail "externalSecrets.secrets must be defined when externalSecretsManaged is true" }}
{{- end }}
refreshInterval: 1h
secretStoreRef:
kind: {{ .Values.externalSecrets.secretStore.kind }}
name: {{ .Values.externalSecrets.secretStore.name }}
target:
creationPolicy: Owner
deletionPolicy: Retain
name: {{ .Values.externalSecrets.targetSecretName }}
{{- end }}

63
wekan/values.yaml Normal file
View File

@@ -0,0 +1,63 @@
externalSecretsManaged: true
externalSecrets:
targetSecretName: wekan-secrets
secrets:
- keyName: wekan
secretKeyName: OAUTH2_CLIENT_ID
property: client_id
- keyName: wekan
secretKeyName: OAUTH2_SECRET
property: secret
secretStore:
kind: ClusterSecretStore
name: weyma-vault
configMapsManaged: true
configMapEnv:
- name: OAUTH2_ENABLED
value: "true"
- name: OAUTH2_LOGIN_STYLE
value: redirect
- name: OAUTH2_SERVER_URL
value: https://auth.dubyatp.xyz
- name: OAUTH2_AUTH_ENDPOINT
value: /application/o/authorize/
- name: OAUTH2_USERINFO_ENDPOINT
value: /application/o/userinfo/
- name: OAUTH2_TOKEN_ENDPOINT
value: /application/o/token/
- name: OAUTH2_ID_MAP
value: sub
- name: OAUTH2_USERNAME_MAP
value: email
- name: OAUTH2_FULLNAME_MAP
value: given_name
- name: OAUTH2_EMAIL_MAP
value: email
wekan:
endpoint: wekan.dubyatp.xyz
root_url: https://wekan.dubyatp.xyz
secretManaged: false
podAnnotations:
backup.velero.io/backup-volumes: shared-data-volume
sharedDataFolder:
accessMode: ReadWriteMany
extraEnvFrom: |
- configMapRef:
name: wekan-config
- secretRef:
name: wekan-secrets
ingress:
enabled: true
path: /
pathtype: ImplementationSpecific
hosts:
- wekan.dubyatp.xyz
tls:
- secretName: cert-dubyatp-xyz
hosts:
- wekan.dubyatp.xyz
mongodb:
updateStrategy:
type: Recreate
podAnnotations:
backup.velero.io/backup-volumes: datadir

View File

@@ -14,7 +14,7 @@ spec:
spec:
containers:
- name: yt-dlp-bot
image: 'git.dubyatp.xyz/williamp/yt-dlp-bot:b9088d9'
image: 'git.dubyatp.xyz/williamp/yt-dlp-bot:df8c7e9'
env:
- name: OUT_PATH
value: /data/youtube-vids

View File

@@ -1,98 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: zap2xml-s3config
data:
.s3cfg: |
[default]
access_key =
access_token =
add_encoding_exts =
add_headers =
bucket_location = US
ca_certs_file =
cache_file =
check_ssl_certificate = True
check_ssl_hostname = True
cloudfront_host = cloudfront.amazonaws.com
connection_max_age = 5
connection_pooling = True
content_disposition =
content_type =
default_mime_type = binary/octet-stream
delay_updates = False
delete_after = False
delete_after_fetch = False
delete_removed = False
dry_run = False
enable_multipart = True
encoding = UTF-8
encrypt = False
expiry_date =
expiry_days =
expiry_prefix =
follow_symlinks = False
force = False
get_continue = False
gpg_command = /usr/bin/gpg
gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_passphrase =
guess_mime_type = True
host_base = https://weyma-s3.infra.dubyatp.xyz
host_bucket =
human_readable_sizes = False
invalidate_default_index_on_cf = False
invalidate_default_index_root_on_cf = True
invalidate_on_cf = False
keep_dirs = False
kms_key =
limit = -1
limitrate = 0
list_allow_unordered = False
list_md5 = False
log_target_prefix =
long_listing = False
max_delete = -1
max_retries = 5
mime_type =
multipart_chunk_size_mb = 15
multipart_copy_chunk_size_mb = 1024
multipart_max_chunks = 10000
preserve_attrs = True
progress_meter = True
proxy_host =
proxy_port = 0
public_url_use_https = False
put_continue = False
recursive = False
recv_chunk = 65536
reduced_redundancy = False
requester_pays = False
restore_days = 1
restore_priority = Standard
secret_key =
send_chunk = 65536
server_side_encryption = False
signature_v2 = False
signurl_use_https = False
simpledb_host = sdb.amazonaws.com
skip_destination_validation = False
skip_existing = False
socket_timeout = 300
ssl_client_cert_file =
ssl_client_key_file =
stats = False
stop_on_error = False
storage_class =
throttle_max = 100
upload_id =
urlencoding_mode = normal
use_http_expect = False
use_https = True
use_mime_magic = True
verbosity = WARNING
website_endpoint = http://%(bucket)s.s3-website-%(location)s.amazonaws.com/
website_error =
website_index = index.html

View File

@@ -1,87 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: zap2xml-dtv-02191
spec:
schedule: "0 */12 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: zap2xml
image: git.dubyatp.xyz/williamp/kube-zap2xml:c075fec
envFrom:
- secretRef:
name: zap2xml-bucket
env:
- name: LINEUP_ID
value: USA-DITV506-X
- name: POSTAL_CODE
value: "02191"
- name: TIMESPAN
value: "120"
- name: OUTPUT_FILE
value: /tmp/xmltv.xml
- name: PUBLIC_FILENAME
value: xmltv-directv-02191.xml
- name: S3_URL
value: s3://zap2xml-c134c9a7-a7a0-4113-997e-78e72ec3f576
volumeMounts:
- name: s3-config
mountPath: /root
- name: temp
mountPath: /tmp
restartPolicy: Never
volumes:
- name: s3-config
configMap:
name: zap2xml-s3config
- name: temp
emptyDir:
sizeLimit: 1Gi
medium: Memory
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: zap2xml-ota-02191
spec:
schedule: "30 */12 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: zap2xml
image: git.dubyatp.xyz/williamp/kube-zap2xml:c075fec
envFrom:
- secretRef:
name: zap2xml-bucket
env:
- name: LINEUP_ID
value: USA-OTA02191
- name: POSTAL_CODE
value: "02191"
- name: TIMESPAN
value: "120"
- name: OUTPUT_FILE
value: /tmp/xmltv.xml
- name: PUBLIC_FILENAME
value: xmltv-ota-02191.xml
- name: S3_URL
value: s3://zap2xml-c134c9a7-a7a0-4113-997e-78e72ec3f576
volumeMounts:
- name: s3-config
mountPath: /root
- name: temp
mountPath: /tmp
restartPolicy: Never
volumes:
- name: s3-config
configMap:
name: zap2xml-s3config
- name: temp
emptyDir:
sizeLimit: 1Gi
medium: Memory